summaryrefslogtreecommitdiffstats
path: root/web/gui/dashboard_info.js
diff options
context:
space:
mode:
Diffstat (limited to 'web/gui/dashboard_info.js')
-rw-r--r--web/gui/dashboard_info.js617
1 files changed, 465 insertions, 152 deletions
diff --git a/web/gui/dashboard_info.js b/web/gui/dashboard_info.js
index db60fd813..11cfda54a 100644
--- a/web/gui/dashboard_info.js
+++ b/web/gui/dashboard_info.js
@@ -291,12 +291,24 @@ netdataDashboard.menu = {
info: 'QEMU virtual machine resource utilization metrics. QEMU (short for Quick Emulator) is a free and open-source hosted hypervisor that performs hardware virtualization.'
},
+ 'docker': {
+ title: 'Docker',
+ icon: '<i class="fas fa-cube"></i>',
+ info: 'Docker containers state and disk usage.'
+ },
+
'fping': {
title: 'fping',
icon: '<i class="fas fa-exchange-alt"></i>',
info: 'Network latency statistics, via <b>fping</b>. <b>fping</b> is a program to send ICMP echo probes to network hosts, similar to <code>ping</code>, but much better performing when pinging multiple hosts. fping versions after 3.15 can be directly used as netdata plugins.'
},
+ 'ping': {
+ title: 'Ping',
+ icon: '<i class="fas fa-exchange-alt"></i>',
+ info: 'Measures round-trip time and packet loss by sending ping messages to network hosts.'
+ },
+
'gearman': {
title: 'Gearman',
icon: '<i class="fas fa-tasks"></i>',
@@ -333,12 +345,24 @@ netdataDashboard.menu = {
info: 'Performance metrics for <b>mysql</b>, the open-source relational database management system (RDBMS).'
},
+ 'nvme': {
+ title: 'NVMe',
+ icon: '<i class="fas fa-hdd"></i>',
+ info: 'NVMe devices SMART and health metrics. Additional information on metrics can be found in the <a href="https://nvmexpress.org/developers/nvme-specification/" target="_blank">NVM Express Base Specification</a>.'
+ },
+
'postgres': {
title: 'PostgreSQL',
icon: '<i class="fas fa-database"></i>',
info: 'Performance metrics for <b>PostgreSQL</b>, the open source object-relational database management system (ORDBMS).'
},
+ 'proxysql': {
+ title: 'ProxySQL',
+ icon: '<i class="fas fa-database"></i>',
+ info: 'Performance metrics for <b>ProxySQL</b>, a high-performance open-source MySQL proxy.'
+ },
+
'pgbouncer': {
title: 'PgBouncer',
icon: '<i class="fas fa-exchange-alt"></i>',
@@ -418,6 +442,12 @@ netdataDashboard.menu = {
info: undefined
},
+ 'nginxplus': {
+ title: 'Nginx Plus',
+ icon: '<i class="fas fa-eye"></i>',
+ info: undefined
+ },
+
'apache': {
title: 'Apache',
icon: '<i class="fas fa-eye"></i>',
@@ -528,7 +558,7 @@ netdataDashboard.menu = {
'logind': {
title: 'Logind',
icon: '<i class="fas fa-user"></i>',
- info: undefined
+ info: 'Keeps track of user logins and sessions by querying the <a href="https://www.freedesktop.org/software/systemd/man/org.freedesktop.login1.html" target="_blank">systemd-logind API</a>.'
},
'powersupply': {
@@ -699,6 +729,16 @@ netdataDashboard.menu = {
icon: '<i class="fas fa-dragon"></i>',
info: 'VPN network interfaces and peers traffic.'
},
+
+ 'pandas': {
+ icon: '<i class="fas fa-teddy-bear"></i>'
+ },
+
+ 'cassandra': {
+ title: 'Cassandra',
+ icon: '<i class="fas fa-database"></i>',
+ info: 'Performance metrics for Cassandra, the open source distributed NoSQL database management system'
+ }
};
@@ -1083,6 +1123,11 @@ netdataDashboard.submenu = {
'IRQ (<a href="#ebpf_global_hard_irq">Hard IRQ</a> and <a href="#ebpf_global_soft_irq">Soft IRQ</a> ), <a href="#ebpf_global_shm">Shared Memory</a>, ' +
'Syscalls (<a href="#menu_mem_submenu_synchronization__eBPF_">Sync</a>, <a href="#menu_mount_submenu_mount__eBPF_">Mount</a>), and <a href="#menu_ip_submenu_kernel">Network</a>.'
},
+
+ 'postgres.connections': {
+ info: 'A connection is an established line of communication between a client and the PostgreSQL server. Each connection adds to the load on the PostgreSQL server. To guard against running out of memory or overloading the database the <i>max_connections</i> parameter (default = 100) defines the maximum number of concurrent connections to the database server. A separate parameter, <i>superuser_reserved_connections</i> (default = 3), defines the quota for superuser connections (so that superusers can connect even if all other connection slots are blocked).'
+ },
+
};
// ----------------------------------------------------------------------------
@@ -2676,7 +2721,7 @@ netdataDashboard.context = {
info: 'Real memory (RAM) used per user group. This does not include shared memory.'
},
'users.mem': {
- info: 'Real memory (RAM) used per user group. This does not include shared memory.'
+ info: 'Real memory (RAM) used per user. This does not include shared memory.'
},
'apps.vmem': {
@@ -2687,7 +2732,7 @@ netdataDashboard.context = {
info: 'Virtual memory allocated per user group since the Netdata restart. Please check <a href="https://github.com/netdata/netdata/tree/master/daemon#virtual-memory" target="_blank">this article</a> for more information.'
},
'users.vmem': {
- info: 'Virtual memory allocated per user group since the Netdata restart. Please check <a href="https://github.com/netdata/netdata/tree/master/daemon#virtual-memory" target="_blank">this article</a> for more information.'
+ info: 'Virtual memory allocated per user since the Netdata restart. Please check <a href="https://github.com/netdata/netdata/tree/master/daemon#virtual-memory" target="_blank">this article</a> for more information.'
},
'apps.minor_faults': {
@@ -3767,21 +3812,11 @@ netdataDashboard.context = {
},
'mysql.galera_cluster_status': {
- info:
- '<code>-1</code>: unknown, ' +
- '<code>0</code>: primary (primary group configuration, quorum present), ' +
- '<code>1</code>: non-primary (non-primary group configuration, quorum lost), ' +
- '<code>2</code>: disconnected(not connected to group, retrying).'
+ info: "<p>Status of this cluster component.</p><p><b>Primary</b> - primary group configuration, quorum present. <b>Non-Primary</b> - non-primary group configuration, quorum lost. <b>Disconnected</b> - not connected to group, retrying.</p>"
},
'mysql.galera_cluster_state': {
- info:
- '<code>0</code>: Undefined, ' +
- '<code>1</code>: Joining, ' +
- '<code>2</code>: Donor/Desynced, ' +
- '<code>3</code>: Joined, ' +
- '<code>4</code>: Synced, ' +
- '<code>5</code>: Inconsistent.'
+ info: "<p>Membership state of this cluster component.</p><p><b>Undefined</b> - undefined state. <b>Joining</b> - the node is attempting to join the cluster. <b>Donor</b> - the node has blocked itself while it sends a State Snapshot Transfer (SST) to bring a new node up to date with the cluster. <b>Joined</b> - the node has successfully joined the cluster. <b>Synced</b> - the node has established a connection with the cluster and synchronized its local databases with those of the cluster. <b>Error</b> - the node is not part of the cluster and does not replicate transactions. This state is provider-specific, check <i>wsrep_local_state_comment</i> variable for a description.</p>"
},
'mysql.galera_cluster_weight': {
@@ -3802,194 +3837,298 @@ netdataDashboard.context = {
// ------------------------------------------------------------------------
// POSTGRESQL
-
- // python version start
- 'postgres.db_stat_blks': {
- info: 'Blocks reads from disk or cache.<ul>' +
- '<li><strong>blks_read:</strong> number of disk blocks read in this database.</li>' +
- '<li><strong>blks_hit:</strong> number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system&#39;s file system cache)</li>' +
- '</ul>'
+ 'postgres.connections_utilization': {
+ room: {
+ mainheads: [
+ function (_, id) {
+ return '<div data-netdata="' + id + '"'
+ + ' data-append-options="percentage"'
+ + ' data-gauge-max-value="100"'
+ + ' data-chart-library="gauge"'
+ + ' data-title="Connections Utilization"'
+ + ' data-units="%"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-colors="' + NETDATA.colors[1] + '"'
+ + ' role="application"></div>';
+ }
+ ],
+ },
+ info: '<b>Total connection utilization</b> across all databases. Utilization is measured as a percentage of (<i>max_connections</i> - <i>superuser_reserved_connections</i>). If the utilization is 100% no more new connections will be accepted (superuser connections will still be accepted if superuser quota is available).'
},
- 'postgres.db_stat_tuple_write': {
- info: '<ul><li>Number of rows inserted/updated/deleted.</li>' +
- '<li><strong>conflicts:</strong> number of queries canceled due to conflicts with recovery in this database. (Conflicts occur only on standby servers; see <a href="https://www.postgresql.org/docs/10/static/monitoring-stats.html#PG-STAT-DATABASE-CONFLICTS-VIEW" target="_blank">pg_stat_database_conflicts</a> for details.)</li>' +
- '</ul>'
+ 'postgres.connections_usage': {
+ info: '<p><b>Connections usage</b> across all databases. The maximum number of concurrent connections to the database server is (<i>max_connections</i> - <i>superuser_reserved_connections</i>). As a general rule, if you need more than 200 connections it is advisable to use connection pooling.</p><p><b>Available</b> - new connections allowed. <b>Used</b> - connections currently in use.</p>'
},
- 'postgres.db_stat_temp_bytes': {
- info: 'Temporary files can be created on disk for sorts, hashes, and temporary query results.'
+ 'postgres.connections_state_count': {
+ info: '<p>Number of connections in each state across all databases.</p><p><b>Active</b> - the backend is executing query. <b>Idle</b> - the backend is waiting for a new client command. <b>IdleInTransaction</b> - the backend is in a transaction, but is not currently executing a query. <b>IdleInTransactionAborted</b> - the backend is in a transaction, and not currently executing a query, but one of the statements in the transaction caused an error. <b>FastPathFunctionCall</b> - the backend is executing a fast-path function. <b>Disabled</b> - is reported if <a href="https://www.postgresql.org/docs/current/runtime-config-statistics.html#GUC-TRACK-ACTIVITIES" target="_blank"><i>track_activities</i></a> is disabled in this backend.</p>'
},
- 'postgres.db_stat_temp_files': {
- info: '<ul>' +
- '<li><strong>files:</strong> number of temporary files created by queries. All temporary files are counted, regardless of why the temporary file was created (e.g., sorting or hashing).</li>' +
- '</ul>'
+ 'postgres.transactions_duration': {
+ info: 'Running transactions duration histogram. The bins are specified as consecutive, non-overlapping intervals. The value is the number of observed transactions that fall into each interval.'
},
- 'postgres.archive_wal': {
- info: 'WAL archiving.<ul>' +
- '<li><strong>total:</strong> total files.</li>' +
- '<li><strong>ready:</strong> WAL waiting to be archived.</li>' +
- '<li><strong>done:</strong> WAL successfully archived. ' +
- 'Ready WAL can indicate archive_command is in error, see <a href="https://www.postgresql.org/docs/current/static/continuous-archiving.html" target="_blank">Continuous Archiving and Point-in-Time Recovery</a>.</li>' +
- '</ul>'
+ 'postgres.queries_duration': {
+ info: 'Active queries duration histogram. The bins are specified as consecutive, non-overlapping intervals. The value is the number of observed active queries that fall into each interval.'
},
- 'postgres.checkpointer': {
- info: 'Number of checkpoints.<ul>' +
- '<li><strong>scheduled:</strong> when checkpoint_timeout is reached.</li>' +
- '<li><strong>requested:</strong> when max_wal_size is reached.</li>' +
- '</ul>' +
- 'For more information see <a href="https://www.postgresql.org/docs/current/static/wal-configuration.html" target="_blank">WAL Configuration</a>.'
- },
- 'postgres.autovacuum': {
- info: 'PostgreSQL databases require periodic maintenance known as vacuuming. For many installations, it is sufficient to let vacuuming be performed by the autovacuum daemon. ' +
- 'For more information see <a href="https://www.postgresql.org/docs/current/static/routine-vacuuming.html#AUTOVACUUM" target="_blank">The Autovacuum Daemon</a>.'
- },
- 'postgres.standby_delta': {
- info: 'Streaming replication delta.<ul>' +
- '<li><strong>sent_delta:</strong> replication delta sent to standby.</li>' +
- '<li><strong>write_delta:</strong> replication delta written to disk by this standby.</li>' +
- '<li><strong>flush_delta:</strong> replication delta flushed to disk by this standby server.</li>' +
- '<li><strong>replay_delta:</strong> replication delta replayed into the database on this standby server.</li>' +
- '</ul>' +
- 'For more information see <a href="https://www.postgresql.org/docs/current/static/warm-standby.html#SYNCHRONOUS-REPLICATION" target="_blank">Synchronous Replication</a>.'
- },
- 'postgres.replication_slot': {
- info: 'Replication slot files.<ul>' +
- '<li><strong>wal_keeped:</strong> WAL files retained by each replication slots.</li>' +
- '<li><strong>pg_replslot_files:</strong> files present in pg_replslot.</li>' +
- '</ul>' +
- 'For more information see <a href="https://www.postgresql.org/docs/current/static/warm-standby.html#STREAMING-REPLICATION-SLOTS" target="_blank">Replication Slots</a>.'
- },
- 'postgres.backend_usage': {
- info: 'Connections usage against maximum connections allowed, as defined in the <i>max_connections</i> setting.<ul>' +
- '<li><strong>available:</strong> maximum new connections allowed.</li>' +
- '<li><strong>used:</strong> connections currently in use.</li>' +
- '</ul>' +
- 'Assuming non-superuser accounts are being used to connect to Postgres (so <i>superuser_reserved_connections</i> are subtracted from <i>max_connections</i>).<br/>' +
- 'For more information see <a href="https://www.postgresql.org/docs/current/runtime-config-connection.html" target="_blank">Connections and Authentication</a>.'
- },
- 'postgres.forced_autovacuum': {
- info: 'Percent towards forced autovacuum for one or more tables.<ul>' +
- '<li><strong>percent_towards_forced_autovacuum:</strong> a forced autovacuum will run once this value reaches 100.</li>' +
- '</ul>' +
- 'For more information see <a href="https://www.postgresql.org/docs/current/routine-vacuuming.html" target="_blank">Preventing Transaction ID Wraparound Failures</a>.'
- },
- 'postgres.tx_wraparound_oldest_current_xid': {
- info: 'The oldest current transaction id (xid).<ul>' +
- '<li><strong>oldest_current_xid:</strong> oldest current transaction id.</li>' +
- '</ul>' +
- 'If for some reason autovacuum fails to clear old XIDs from a table, the system will begin to emit warning messages when the database\'s oldest XIDs reach eleven million transactions from the wraparound point.<br/>' +
- 'For more information see <a href="https://www.postgresql.org/docs/current/routine-vacuuming.html" target="_blank">Preventing Transaction ID Wraparound Failures</a>.'
- },
- 'postgres.percent_towards_wraparound': {
- info: 'Percent towards transaction wraparound.<ul>' +
- '<li><strong>percent_towards_wraparound:</strong> transaction wraparound may occur when this value reaches 100.</li>' +
- '</ul>' +
- 'For more information see <a href="https://www.postgresql.org/docs/current/routine-vacuuming.html" target="_blank">Preventing Transaction ID Wraparound Failures</a>.'
- },
- // python version end
- 'postgres.connections_utilization': {
- info: 'Connections in use as percentage of <i>max_connections</i>. Connection "slots" that are reserved for superusers (<i>superuser_reserved_connections</i>) are subtracted from the limit. If the utilization is 100% new connections will be accepted only for superusers, and no new replication connections will be accepted.'
+ 'postgres.checkpoints_rate': {
+ info: '<p>Number of checkpoints that have been performed. Checkpoints are periodic maintenance operations the database performs to make sure that everything it\'s been caching in memory has been synchronized with the disk. Ideally checkpoints should be time-driven (scheduled) as opposed to load-driven (requested).</p><p><b>Scheduled</b> - checkpoints triggered as per schedule when time elapsed from the previous checkpoint is greater than <a href="https://www.postgresql.org/docs/current/runtime-config-wal.html#GUC-CHECKPOINT-TIMEOUT" target="_blank"><i>checkpoint_timeout</i></a>. <b>Requested</b> - checkpoints triggered due to WAL updates reaching the <a href="https://www.postgresql.org/docs/current/runtime-config-wal.html#GUC-MAX-WAL-SIZE" target="_blank"><i>max_wal_size</i></a> before the <i>checkpoint_timeout</i> is reached.</p>'
},
- 'postgres.connections_usage': {
- info: '<p>Connections usage. The maximum number of concurrent connections to the database server is <i>max_connections</i> minus <i>superuser_reserved_connections</i>.</p><p><b>Available</b> - new connections allowed. <b>Used</b> - connections currently in use.</p>'
+ 'postgres.checkpoints_time': {
+ info: '<p>Checkpoint timing information. An important indicator of how well checkpoint I/O is performing is the amount of time taken to sync files to disk.</p><p><b>Write</b> - amount of time spent writing files to disk during checkpoint processing. <b>Sync</b> - amount of time spent synchronizing files to disk during checkpoint processing.</p>'
},
- 'postgres.checkpoints': {
- info: '<p>Number of checkpoints that have been performed. Checkpoints are periodic maintenance operations the database performs to make sure that everything it’s been caching in memory has been synchronized with the disk. It’s desirable when checkpoints are scheduled rather than requested, as the latter can indicate that your databases are under heavy load.</p><p><b>Scheduled</b> - checkpoints triggered due that the time elapsed from the previous checkpoint is more than pg setting <i>checkpoint_timeout</i>. <b>Requested</b> - checkpoints ran due to uncheckpointed WAL size grew to more than <i>max_wal_size</i> setting.</p>'
+ 'postgres.buffers_allocated_rate': {
+ info: 'Allocated and re-allocated buffers. If a backend process requests data it is either found in a block in shared buffer cache or the block has to be allocated (read from disk). The latter is counted as <b>Allocated</b>.'
},
- 'postgres.checkpoint_time': {
- info: '<p>Checkpoint timing information.</p><p><b>Write</b> - amount of time that has been spent in the portion of checkpoint processing where files are written to disk. <b>Sync</b> - amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk.</p>'
+ 'postgres.buffers_io_rate': {
+ info: '<p>Amount of data flushed from memory to disk.</p><p><b>Checkpoint</b> - buffers written during checkpoints. <b>Backend</b> - buffers written directly by a backend. It may happen that a dirty page is requested by a backend process. In this case the page is synced to disk before the page is returned to the client. <b>BgWriter</b> - buffers written by the background writer. PostgreSQL may clear pages with a low usage count in advance. The process scans for dirty pages with a low usage count so that they could be cleared if necessary. Buffers written by this process increment the counter.</p>'
},
- 'postgres.bgwriter_buffers_alloc': {
- info: 'Allocated and re-allocated buffers. If a backend process requests data it is either found in a block in shared buffer cache or the block has to be allocated (read from disk). The latter is counted as <b>Allocated</b>.'
+ 'postgres.bgwriter_halts_rate': {
+ info: 'Number of times the background writer stopped a cleaning scan because it had written too many buffers (exceeding the value of <a href="https://www.postgresql.org/docs/current/runtime-config-resource.html#RUNTIME-CONFIG-RESOURCE-BACKGROUND-WRITER" target="_blank"><i>bgwriter_lru_maxpages</i></a>).'
},
- 'postgres.bgwriter_buffers_written': {
- info: '<p>Amount of data flushed from memory to disk.</p><p><b>Checkpoint</b> - buffers written during checkpoints. <b>Backend</b> - buffers written directly by a backend. It may happen that a dirty page is requested by a backend process. In this case the page is synched to disk before the page is returned to the client. <b>Clean</b> - buffers written by the background writer. PostgreSQL may clear pages with a low usage count in advance. The process scans for dirty pages with a low usage count so that they could be cleared if necessay. Buffers written by this process increment the counter.</p>'
+ 'postgres.buffers_backend_fsync_rate': {
+ info: 'Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write). Any values above zero can indicate problems with storage when fsync queue is completely filled.'
},
- 'postgres.bgwriter_maxwritten_clean': {
- info: 'Number of times the background writer stopped a cleaning scan because it had written too many buffers (exceeding the value of <i>bgwriter_lru_maxpages</i>).'
+ 'postgres.wal_io_rate': {
+ info: 'Write-Ahead Logging (WAL) ensures data integrity by ensuring that changes to data files (where tables and indexes reside) are written only after log records describing the changes have been flushed to permanent storage.'
},
- 'postgres.bgwriter_buffers_backend_fsync': {
- info: 'Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write). Any values above zero can indicate problems with storage when fsync queue is completely filled. '
+ 'postgres.wal_files_count': {
+ info: '<p>Number of WAL logs stored in the directory <i>pg_wal</i> under the data directory.</p><p><b>Written</b> - generated log segments files. <b>Recycled</b> - old log segment files that are no longer needed. Renamed to become future segments in the numbered sequence to avoid the need to create new ones.</p>'
},
- 'postgres.wal_archive_files': {
- info: '<p>WAL archiving.</p><p><b>Ready</b> - WAL files waiting to be archived. A non-zero value can indicate <i>archive_command</i> is in error, see <a href="https://www.postgresql.org/docs/current/static/continuous-archiving.html" target="_blank">Continuous Archiving and Point-in-Time Recovery</a> <b>Done</b> - WAL files successfully archived.'
+ 'postgres.wal_archiving_files_count': {
+ info: '<p>WAL archiving.</p><p><b>Ready</b> - WAL files waiting to be archived. A non-zero value can indicate <i>archive_command</i> is in error, see <a href="https://www.postgresql.org/docs/current/static/continuous-archiving.html" target="_blank">Continuous Archiving and Point-in-Time Recovery</a>. <b>Done</b> - WAL files successfully archived.'
},
- 'postgres.autovacuum_workers': {
+ 'postgres.autovacuum_workers_count': {
info: 'PostgreSQL databases require periodic maintenance known as vacuuming. For many installations, it is sufficient to let vacuuming be performed by the autovacuum daemon. For more information see <a href="https://www.postgresql.org/docs/current/static/routine-vacuuming.html#AUTOVACUUM" target="_blank">The Autovacuum Daemon</a>.'
},
- 'postgres.percent_towards_emergency_autovacuum': {
+ 'postgres.txid_exhaustion_towards_autovacuum_perc': {
info: 'Percentage towards emergency autovacuum for one or more tables. A forced autovacuum will run once this value reaches 100%. For more information see <a href="https://www.postgresql.org/docs/current/routine-vacuuming.html#VACUUM-FOR-WRAPAROUND" target="_blank">Preventing Transaction ID Wraparound Failures</a>.'
},
- 'postgres.percent_towards_txid_wraparound': {
+ 'postgres.txid_exhaustion_perc': {
info: 'Percentage towards transaction wraparound. A transaction wraparound may occur when this value reaches 100%. For more information see <a href="https://www.postgresql.org/docs/current/routine-vacuuming.html#VACUUM-FOR-WRAPAROUND" target="_blank">Preventing Transaction ID Wraparound Failures</a>.'
},
- 'postgres.oldest_transaction_xid': {
+ 'postgres.txid_exhaustion_oldest_txid_num': {
info: 'The oldest current transaction ID (XID). If for some reason autovacuum fails to clear old XIDs from a table, the system will begin to emit warning messages when the database\'s oldest XIDs reach eleven million transactions from the wraparound point. For more information see <a href="https://www.postgresql.org/docs/current/routine-vacuuming.html#VACUUM-FOR-WRAPAROUND" target="_blank">Preventing Transaction ID Wraparound Failures</a>.'
},
'postgres.uptime': {
+ room: {
+ mainheads: [
+ function (os, id) {
+ void (os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-chart-library="easypiechart"'
+ + ' data-title="Uptime"'
+ + ' data-units="Seconds"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="10%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' role="application"></div>';
+ }
+ ],
+ },
info: 'The time elapsed since the Postgres process was started.'
},
- 'postgres.replication_standby_app_wal_delta': {
- info: '<p>Replication WAL delta.</p><p><b>SentDelta</b> - sent over the network. <b>WriteDelta</b> - written to disk. <b>FlushDelta</b> - flushed to disk. <b>ReplayDelta</b> - replayed into the database.</p>'
+ 'postgres.replication_app_wal_lag_size': {
+ info: '<p>Replication WAL lag size.</p><p><b>SentLag</b> - sent over the network. <b>WriteLag</b> - written to disk. <b>FlushLag</b> - flushed to disk. <b>ReplayLag</b> - replayed into the database.</p>'
},
- 'postgres.replication_standby_app_wal_lag': {
- info: '<p>Replication WAL lag.</p><p><b>WriteLag</b> - time elapsed between flushing recent WAL locally and receiving notification that the standby server has written it, but not yet flushed it or applied it. <b>FlushLag</b> - time elapsed between flushing recent WAL locally and receiving notification that the standby server has written and flushed it, but not yet applied it. <b>ReplayLag</b> - time elapsed between flushing recent WAL locally and receiving notification that the standby server has written, flushed and applied it.</p>'
+ 'postgres.replication_app_wal_lag_time': {
+ info: '<p>Replication WAL lag time.</p><p><b>WriteLag</b> - time elapsed between flushing recent WAL locally and receiving notification that the standby server has written it, but not yet flushed it or applied it. <b>FlushLag</b> - time elapsed between flushing recent WAL locally and receiving notification that the standby server has written and flushed it, but not yet applied it. <b>ReplayLag</b> - time elapsed between flushing recent WAL locally and receiving notification that the standby server has written, flushed and applied it.</p>'
},
- 'postgres.replication_slot_files': {
+ 'postgres.replication_slot_files_count': {
info: '<p>Replication slot files. For more information see <a href="https://www.postgresql.org/docs/current/static/warm-standby.html#STREAMING-REPLICATION-SLOTS" target="_blank">Replication Slots</a>.</p><p><b>WalKeep</b> - WAL files retained by the replication slot. <b>PgReplslotFiles</b> - files present in pg_replslot.</p>'
},
'postgres.db_transactions_ratio': {
- info: 'Percentage of commited/rollback transactions.'
+ info: 'Percentage of committed/rollback transactions.'
},
- 'postgres.db_transactions': {
- info: '<p>Number of transactions that have been performed</p><p><b>Commited</b> - transactions that have been committed. All changes made by the committed transaction become visible to others and are guaranteed to be durable if a crash occurs. <b>Rollback</b> - transactions that have been rolled back. Rollback aborts the current transaction and causes all the updates made by the transaction to be discarded. Single queries that have failed outside the transactions are also accounted as rollbacks.</p>'
+ 'postgres.db_transactions_rate': {
+ info: '<p>Number of transactions that have been performed</p><p><b>Committed</b> - transactions that have been committed. All changes made by the committed transaction become visible to others and are guaranteed to be durable if a crash occurs. <b>Rollback</b> - transactions that have been rolled back. Rollback aborts the current transaction and causes all the updates made by the transaction to be discarded. Single queries that have failed outside the transactions are also accounted as rollbacks.</p>'
},
'postgres.db_connections_utilization': {
- info: 'Connections in use as percentage of the database\'s <i>CONNECTION LIMIT</i> (if set) or <i>max_connections</i>.'
+ info: 'Connection utilization per database. Utilization is measured as a percentage of <i>CONNECTION LIMIT</i> per database (if set) or <i>max_connections</i> (if <i>CONNECTION LIMIT</i> is not set).'
},
- 'postgres.db_connections': {
- info: 'Number of backends currently connected to this database.'
+ 'postgres.db_connections_count': {
+ info: 'Number of current connections per database.'
},
- 'postgres.db_buffer_cache_hit_ratio': {
- info: 'Buffer cache hit ratio. When clients request data, postgres checks shared memory and if there are no relevant data there it has to read it from disk, thus queries become slower.'
+ 'postgres.db_cache_io_ratio': {
+ room: {
+ mainheads: [
+ function (_, id) {
+ return '<div data-netdata="' + id + '"'
+ + ' data-append-options="percentage"'
+ + ' data-gauge-max-value="100"'
+ + ' data-chart-library="gauge"'
+ + ' data-title="Cache Miss Ratio"'
+ + ' data-units="%"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-colors="' + NETDATA.colors[1] + '"'
+ + ' role="application"></div>';
+ }
+ ],
+ },
+ info: 'PostgreSQL uses a <b>shared buffer cache</b> to store frequently accessed data in memory, and avoid slower disk reads. If you are seeing performance issues, consider increasing the <a href="https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-SHARED-BUFFERS" target="_blank"><i>shared_buffers</i></a> size or tuning <a href="https://www.postgresql.org/docs/current/runtime-config-query.html#GUC-EFFECTIVE-CACHE-SIZE" target="_blank"><i>effective_cache_size</i></a>.'
},
- 'postgres.db_blocks_read': {
- info: '<p>Number of blocks read from shared buffer cache or from disk.</p><p><b>disk</b> - number of disk blocks read. <b>memory</b> - number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system\'s file system cache).</p>'
+ 'postgres.db_io_rate': {
+ info: '<p>Amount of data read from shared buffer cache or from disk.</p><p><b>Disk</b> - data read from disk. <b>Memory</b> - data read from buffer cache (this only includes hits in the PostgreSQL buffer cache, not the operating system\'s file system cache).</p>'
},
- 'postgres.db_rows_read_ratio': {
- info: 'Percentage of returned/fetched rows.'
+ 'postgres.db_ops_fetched_rows_ratio': {
+ room: {
+ mainheads: [
+ function (_, id) {
+ return '<div data-netdata="' + id + '"'
+ + ' data-append-options="percentage"'
+ + ' data-gauge-max-value="100"'
+ + ' data-chart-library="gauge"'
+ + ' data-title="Rows Fetched vs Returned"'
+ + ' data-units="%"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-colors="' + NETDATA.colors[1] + '"'
+ + ' role="application"></div>';
+ }
+ ],
+ },
+ info: 'The percentage of rows that contain data needed to execute the query, out of the total number of rows scanned. A high value indicates that the database is executing queries efficiently, while a low value indicates that the database is performing extra work by scanning a large number of rows that aren\'t required to process the query. Low values may be caused by missing indexes or inefficient queries.'
},
- 'postgres.db_rows_read': {
- info: '<p>Read queries throughput.</p><p><b>Returned</b> - number of rows returned by queries. The value keeps track of the number of rows read/scanned, not the rows actually returned to the client. <b>Fetched</b> - number of rows fetched that contained data necessary to execute the query successfully.</p>'
+ 'postgres.db_ops_read_rows_rate': {
+ info: '<p>Read queries throughput.</p><p><b>Returned</b> - Total number of rows scanned by queries. This value indicates rows returned by the storage layer to be scanned, not rows returned to the client. <b>Fetched</b> - Subset of scanned rows (<b>Returned</b>) that contained data needed to execute the query.</p>'
},
- 'postgres.db_rows_written': {
+ 'postgres.db_ops_write_rows_rate': {
info: '<p>Write queries throughput.</p><p><b>Inserted</b> - number of rows inserted by queries. <b>Deleted</b> - number of rows deleted by queries. <b>Updated</b> - number of rows updated by queries.</p>'
},
- 'postgres.db_conflicts': {
- info: 'Number of queries canceled due to conflicts with recovery. Conflicts occur only on standby servers.'
+ 'postgres.db_conflicts_rate': {
+ info: 'Number of queries canceled due to conflict with recovery on standby servers. To minimize query cancels caused by cleanup records consider configuring <a href="https://www.postgresql.org/docs/current/runtime-config-replication.html#GUC-HOT-STANDBY-FEEDBACK" target="_blank"><i>hot_standby_feedback</i></a>.'
},
- 'postgres.db_conflicts_stat': {
- info: '<p>Number of queries canceled due to conflicts with recovery.</p><p><b>Tablespace</b> - queries that have been canceled due to dropped tablespaces. <b>Lock</b> - queries that have been canceled due to lock timeouts. <b>Snapshot</b> - queries that have been canceled due to old snapshots. <b>Bufferpin</b> - queries that have been canceled due to pinned buffers. <b>Deadlock</b> - queries that have been canceled due to deadlocks.</p>'
+ 'postgres.db_conflicts_reason_rate': {
+ info: '<p>Statistics about queries canceled due to various types of conflicts on standby servers.</p><p><b>Tablespace</b> - queries that have been canceled due to dropped tablespaces. <b>Lock</b> - queries that have been canceled due to lock timeouts. <b>Snapshot</b> - queries that have been canceled due to old snapshots. <b>Bufferpin</b> - queries that have been canceled due to pinned buffers. <b>Deadlock</b> - queries that have been canceled due to deadlocks.</p>'
},
- 'postgres.db_deadlocks': {
+ 'postgres.db_deadlocks_rate': {
info: 'Number of detected deadlocks. When a transaction cannot acquire the requested lock within a certain amount of time (configured by <b>deadlock_timeout</b>), it begins deadlock detection.'
},
- 'postgres.db_locks_held': {
+ 'postgres.db_locks_held_count': {
info: 'Number of held locks. Some of these lock modes are acquired by PostgreSQL automatically before statement execution, while others are provided to be used by applications. All lock modes acquired in a transaction are held for the duration of the transaction. For lock modes details, see <a href="https://www.postgresql.org/docs/current/explicit-locking.html#LOCKING-TABLES" target="_blank">table-level locks</a>.'
},
- 'postgres.db_locks_awaited': {
+ 'postgres.db_locks_awaited_count': {
info: 'Number of awaited locks. It indicates that some transaction is currently waiting to acquire a lock, which implies that some other transaction is holding a conflicting lock mode on the same lockable object. For lock modes details, see <a href="https://www.postgresql.org/docs/current/explicit-locking.html#LOCKING-TABLES" target="_blank">table-level locks</a>.'
},
- 'postgres.db_temp_files': {
+ 'postgres.db_temp_files_created_rate': {
info: 'Number of temporary files created by queries. Complex queries may require more memory than is available (specified by <b>work_mem</b>). When this happens, Postgres reverts to using temporary files - they are actually stored on disk, but only exist for the duration of the request. After the request returns, the temporary files are deleted.'
},
- 'postgres.db_temp_files_data': {
+ 'postgres.db_temp_files_io_rate': {
info: 'Amount of data written temporarily to disk to execute queries.'
},
'postgres.db_size': {
+ room: {
+ mainheads: [
+ function (os, id) {
+ void (os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-chart-library="easypiechart"'
+ + ' data-title="DB Size"'
+ + ' data-units="MiB"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="10%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' role="application"></div>';
+ }
+ ],
+ },
info: 'Actual on-disk usage of the database\'s data directory and any associated tablespaces.'
},
+ 'postgres.table_rows_dead_ratio': {
+ info: 'Percentage of dead rows. An increase in dead rows indicates a problem with VACUUM processes, which can slow down your queries.'
+ },
+ 'postgres.table_rows_count': {
+ info: '<p>Number of rows. When you do an UPDATE or DELETE, the row is not actually physically deleted. For a DELETE, the database simply marks the row as unavailable for future transactions, and for UPDATE, under the hood it is a combined INSERT then DELETE, where the previous version of the row is marked unavailable.</p><p><b>Live</b> - rows that currently in use and can be queried. <b>Dead</b> - deleted rows that will later be reused for new rows from INSERT or UPDATE.</p>'
+ },
+ 'postgres.table_ops_rows_rate': {
+ info: 'Write queries throughput. If you see a large number of updated and deleted rows, keep an eye on the number of dead rows, as a high percentage of dead rows can slow down your queries.'
+ },
+ 'postgres.table_ops_rows_hot_ratio': {
+ info: 'Percentage of HOT (Heap Only Tuple) updated rows. HOT updates are much more efficient than ordinary updates: less write operations, less WAL writes, vacuum operation has less work to do, increased read efficiency (help to limit table and index bloat).'
+ },
+ 'postgres.table_ops_rows_hot_rate': {
+ info: 'Number of HOT (Heap Only Tuple) updated rows.'
+ },
+ 'postgres.table_cache_io_ratio': {
+ info: 'Table cache inefficiency. Percentage of data read from disk. Lower is better.'
+ },
+ 'postgres.table_io_rate': {
+ info: '<p>Amount of data read from shared buffer cache or from disk.</p><p><b>Disk</b> - data read from disk. <b>Memory</b> - data read from buffer cache (this only includes hits in the PostgreSQL buffer cache, not the operating system\'s file system cache).</p>'
+ },
+ 'postgres.table_index_cache_io_ratio': {
+ info: 'Table indexes cache inefficiency. Percentage of data read from disk. Lower is better.'
+ },
+ 'postgres.table_index_io_rate': {
+ info: '<p>Amount of data read from all indexes from shared buffer cache or from disk.</p><p><b>Disk</b> - data read from disk. <b>Memory</b> - data read from buffer cache (this only includes hits in the PostgreSQL buffer cache, not the operating system\'s file system cache).</p>'
+ },
+ 'postgres.table_toast_cache_io_ratio': {
+ info: 'Table TOAST cache inefficiency. Percentage of data read from disk. Lower is better.'
+ },
+ 'postgres.table_toast_io_rate': {
+ info: '<p>Amount of data read from TOAST table from shared buffer cache or from disk.</p><p><b>Disk</b> - data read from disk. <b>Memory</b> - data read from buffer cache (this only includes hits in the PostgreSQL buffer cache, not the operating system\'s file system cache).</p>'
+ },
+ 'postgres.table_toast_index_cache_io_ratio': {
+ info: 'Table TOAST indexes cache inefficiency. Percentage of data read from disk. Lower is better.'
+ },
+ 'postgres.table_toast_index_io_rate': {
+ info: '<p>Amount of data read from this table\'s TOAST table indexes from shared buffer cache or from disk.</p><p><b>Disk</b> - data read from disk. <b>Memory</b> - data read from buffer cache (this only includes hits in the PostgreSQL buffer cache, not the operating system\'s file system cache).</p>'
+ },
+ 'postgres.table_scans_rate': {
+ info: '<p>Number of scans initiated on this table. If you see that your database regularly performs more sequential scans over time, you can improve its performance by creating an index on data that is frequently accessed.</p><p><b>Index</b> - relying on an index to point to the location of specific rows. <b>Sequential</b> - have to scan through each row of a table sequentially. Typically, take longer than index scans.</p>'
+ },
+ 'postgres.table_scans_rows_rate': {
+ info: 'Number of live rows fetched by scans.'
+ },
+ 'postgres.table_autovacuum_since_time': {
+ info: 'Time elapsed since this table was vacuumed by the autovacuum daemon.'
+ },
+ 'postgres.table_vacuum_since_time': {
+ info: 'Time elapsed since this table was manually vacuumed (not counting VACUUM FULL).'
+ },
+ 'postgres.table_autoanalyze_since_time': {
+ info: 'Time elapsed this table was analyzed by the autovacuum daemon.'
+ },
+ 'postgres.table_analyze_since_time': {
+ info: 'Time elapsed since this table was manually analyzed.'
+ },
+ 'postgres.table_null_columns': {
+ info: 'Number of table columns that contain only NULLs.'
+ },
+ 'postgres.table_total_size': {
+ info: 'Actual on-disk size of the table.'
+ },
+ 'postgres.table_bloat_size_perc': {
+ info: 'Estimated percentage of bloat in the table. It is normal for tables that are updated frequently to have a small to moderate amount of bloat.'
+ },
+ 'postgres.table_bloat_size': {
+ info: 'Disk space that was used by the table and is available for reuse by the database but has not been reclaimed. Bloated tables require more disk storage and additional I/O that can slow down query execution. Running <a href="https://www.postgresql.org/docs/current/sql-vacuum.html" target="_blank">VACUUM</a> regularly on a table that is updated frequently results in fast reuse of space occupied by expired rows, which prevents the table from growing too large.'
+ },
+ 'postgres.index_size': {
+ info: 'Actual on-disk size of the index.'
+ },
+ 'postgres.index_bloat_size_perc': {
+ info: 'Estimated percentage of bloat in the index.'
+ },
+ 'postgres.index_bloat_size': {
+ info: 'Disk space that was used by the index and is available for reuse by the database but has not been reclaimed. Bloat slows down your database and eats up more storage than needed. To recover the space from indexes, recreate them using the <a href="https://www.postgresql.org/docs/current/sql-reindex.html" target="_blank">REINDEX</a> command.'
+ },
+ 'postgres.index_usage_status': {
+ info: 'An index is considered unused if no scans have been initiated on that index.'
+ },
+
// ------------------------------------------------------------------------
// PgBouncer
@@ -4034,6 +4173,114 @@ netdataDashboard.context = {
},
// ------------------------------------------------------------------------
+ // CASSANDRA
+
+ 'cassandra.client_requests_rate': {
+ info: 'Client requests received per second. Consider whether your workload is read-heavy or write-heavy while choosing a compaction strategy.'
+ },
+ 'cassandra.client_requests_latency': {
+ info: 'Response latency of requests received per second. Latency could be impacted by disk access, network latency or replication configuration.'
+ },
+ 'cassandra.key_cache_hit_ratio': {
+ info: 'Key cache hit ratio indicates the efficiency of the key cache. If ratio is consistently < 80% consider increasing cache size.'
+ },
+ 'cassandra.key_cache_hit_rate': {
+ info: 'Key cache hit rate measures the cache hits and misses per second.'
+ },
+ 'cassandra.storage_live_disk_space_used': {
+ info: 'Amount of live disk space used. This does not include obsolete data waiting to be garbage collected.'
+ },
+ 'cassandra.compaction_completed_tasks_rate': {
+ info: 'Compaction tasks completed per second.'
+ },
+ 'cassandra.compaction_pending_tasks_count': {
+ info: 'Total compaction tasks in queue.'
+ },
+ 'cassandra.thread_pool_active_tasks_count': {
+ info: 'Total tasks currently being processed.'
+ },
+ 'cassandra.thread_pool_pending_tasks_count': {
+ info: 'Total tasks in queue awaiting a thread for processing.'
+ },
+ 'cassandra.thread_pool_blocked_tasks_rate': {
+ info: 'Tasks that cannot be queued for processing yet.'
+ },
+ 'cassandra.thread_pool_blocked_tasks_count': {
+ info: 'Total tasks that cannot yet be queued for processing.'
+ },
+ 'cassandra.jvm_gc_rate': {
+ info: 'Rate of garbage collections.</p><p><b>ParNew</b> - young-generation. <b>cms (ConcurrentMarkSweep)</b> - old-generation.</p>'
+ },
+ 'cassandra.jvm_gc_time': {
+ info: 'Elapsed time of garbage collection.</p><p><b>ParNew</b> - young-generation. <b>cms (ConcurrentMarkSweep)</b> - old-generation.</p>'
+ },
+ 'cassandra.client_requests_timeouts_rate': {
+ info: 'Requests which were not acknowledged within the configurable timeout window.'
+ },
+ 'cassandra.client_requests_unavailables_rate': {
+ info: 'Requests for which the required number of nodes was unavailable.'
+ },
+ 'cassandra.storage_exceptions_rate': {
+ info: 'Requests for which a storage exception was encountered.'
+ },
+
+ // ------------------------------------------------------------------------
+ // WMI (Process)
+
+ 'wmi.processes_cpu_time': {
+ info: 'Total CPU utilization. The amount of time spent by the process in <a href="https://en.wikipedia.org/wiki/CPU_modes#Mode_types" target="_blank">user and privileged</a> modes.'
+ },
+ 'wmi.processes_handles': {
+ info: 'Total number of <a href="https://learn.microsoft.com/en-us/windows/win32/sysinfo/handles-and-objects" target="_blank">handles</a> the process has open. This number is the sum of the handles currently open by each thread in the process.'
+ },
+ 'wmi.processes_io_bytes': {
+ info: 'Bytes issued to I/O operations in different modes (read, write, other). This property counts all I/O activity generated by the process to include file, network, and device I/Os. Read and write mode includes data operations; other mode includes those that do not involve data, such as control operations.'
+ },
+ 'wmi.processes_io_operations': {
+ info: 'I/O operations issued in different modes (read, write, other). This property counts all I/O activity generated by the process to include file, network, and device I/Os. Read and write mode includes data operations; other mode includes those that do not involve data, such as control operations.'
+ },
+ 'wmi.processes_page_faults': {
+ info: 'Page faults by the threads executing in this process. A page fault occurs when a thread refers to a virtual memory page that is not in its working set in main memory. This can cause the page not to be fetched from disk if it is on the standby list and hence already in main memory, or if it is in use by another process with which the page is shared.'
+ },
+ 'wmi.processes_file_bytes': {
+ info: 'Current number of bytes this process has used in the paging file(s). Paging files are used to store pages of memory used by the process that are not contained in other files. Paging files are shared by all processes, and lack of space in paging files can prevent other processes from allocating memory.'
+ },
+ 'wmi.processes_pool_bytes': {
+ info: 'Pool Bytes is the last observed number of bytes in the paged or nonpaged pool. The nonpaged pool is an area of system memory (physical memory used by the operating system) for objects that cannot be written to disk, but must remain in physical memory as long as they are allocated. The paged pool is an area of system memory (physical memory used by the operating system) for objects that can be written to disk when they are not being used.'
+ },
+ 'wmi.processes_threads': {
+ info: 'Number of threads currently active in this process. An instruction is the basic unit of execution in a processor, and a thread is the object that executes instructions. Every running process has at least one thread.'
+ },
+
+ // ------------------------------------------------------------------------
+ // WMI (TCP)
+
+ 'wmi.tcp_conns_active': {
+ info: 'Number of times TCP connections have made a direct transition from the CLOSED state to the SYN-SENT state.'
+ },
+ 'wmi.tcp_conns_established': {
+ info: 'Number of TCP connections for which the current state is either ESTABLISHED or CLOSE-WAIT.'
+ },
+ 'wmi.tcp_conns_failures': {
+ info: 'Number of times TCP connections have made a direct transition to the CLOSED state from the SYN-SENT state or the SYN-RCVD state, plus the number of times TCP connections have made a direct transition from the SYN-RCVD state to the LISTEN state.'
+ },
+ 'wmi.tcp_conns_passive': {
+ info: 'Number of times TCP connections have made a direct transition from the LISTEN state to the SYN-RCVD state.'
+ },
+ 'wmi.tcp_conns_resets': {
+ info: 'Number of times TCP connections have made a direct transition from the LISTEN state to the SYN-RCVD state.'
+ },
+ 'wmi.tcp_segments_received': {
+ info: 'Rate at which segments are received, including those received in error. This count includes segments received on currently established connections.'
+ },
+ 'wmi.tcp_segments_retransmitted': {
+ info: 'Rate at which segments are retransmitted, that is, segments transmitted that contain one or more previously transmitted bytes.'
+ },
+ 'wmi.tcp_segments_sent': {
+ info: 'Rate at which segments are sent, including those on current connections, but excluding those containing only retransmitted bytes.'
+ },
+
+ // ------------------------------------------------------------------------
// APACHE
'apache.connections': {
@@ -5911,15 +6158,16 @@ netdataDashboard.context = {
},
'logind.sessions': {
- info: 'Shows the number of active sessions of each type tracked by logind.'
+ info: 'Local and remote sessions.'
},
-
- 'logind.users': {
- info: 'Shows the number of active users of each type tracked by logind.'
+ 'logind.sessions_type': {
+ info: '<p>Sessions of each session type.</p><p><b>Graphical</b> - sessions are running under one of X11, Mir, or Wayland. <b>Console</b> - sessions are usually regular text mode local logins, but depending on how the system is configured may have an associated GUI. <b>Other</b> - sessions are those that do not fall into the above categories (such as sessions for cron jobs or systemd timer units).</p>'
},
-
- 'logind.seats': {
- info: 'Shows the number of active seats tracked by logind. Each seat corresponds to a combination of a display device and input device providing a physical presence for the system.'
+ 'logind.sessions_state': {
+ info: '<p>Sessions in each session state.</p><p><b>Online</b> - logged in and running in the background. <b>Closing</b> - nominally logged out, but some processes belonging to it are still around. <b>Active</b> - logged in and running in the foreground.</p>'
+ },
+ 'logind.users_state': {
+ info: '<p>Users in each user state.</p><p><b>Offline</b> - users are not logged in. <b>Closing</b> - users are in the process of logging out without lingering. <b>Online</b> - users are logged in, but have no active sessions. <b>Lingering</b> - users are not logged in, but have one or more services still running. <b>Active</b> - users are logged in, and have at least one active session.</p>'
},
// ------------------------------------------------------------------------
@@ -7368,6 +7616,71 @@ netdataDashboard.context = {
'<a href="https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-state-terminated" target="_blank">More info.</a>'
},
+ // Ping
+
+ 'ping.host_rtt': {
+ info: 'Round-trip time (RTT) is the time it takes for a data packet to reach its destination and return back to its original source.'
+ },
+ 'ping.host_std_dev_rtt': {
+ info: 'Round-trip time (RTT) standard deviation. The average value of how far each RTT of a ping differs from the average RTT.'
+ },
+ 'ping.host_packet_loss': {
+ info: 'Packet loss occurs when one or more transmitted data packets do not reach their destination. Usually caused by data transfer errors, network congestion or firewall blocking. ICMP echo packets are often treated as lower priority by routers and target hosts, so ping test packet loss may not always translate to application packet loss.'
+ },
+ 'ping.host_packets': {
+ info: 'Number of ICMP messages sent and received. These counters should be equal if there is no packet loss.'
+ },
+
+ // NVMe
+
+ 'nvme.device_estimated_endurance_perc': {
+ info: 'NVM subsystem lifetime used based on the actual usage and the manufacturer\'s prediction of NVM life. A value of 100 indicates that the estimated endurance of the device has been consumed, but may not indicate a device failure. The value can be greater than 100 if you use the storage beyond its planned lifetime.'
+ },
+ 'nvme.device_available_spare_perc': {
+ info: 'Remaining spare capacity that is available. SSDs provide a set of internal spare capacity, called spare blocks, that can be used to replace blocks that have reached their write operation limit. After all of the spare blocks have been used, the next block that reaches its limit causes the disk to fail.'
+ },
+ 'nvme.device_composite_temperature': {
+ info: 'The current composite temperature of the controller and namespace(s) associated with that controller. The manner in which this value is computed is implementation specific and may not represent the actual temperature of any physical point in the NVM subsystem.'
+ },
+ 'nvme.device_io_transferred_count': {
+ info: 'The total amount of data read and written by the host.'
+ },
+ 'nvme.device_power_cycles_count': {
+ info: 'Power cycles reflect the number of times this host has been rebooted or the device has been woken up after sleep. A high number of power cycles does not affect the device\'s life expectancy.'
+ },
+ 'nvme.device_power_on_time': {
+ info: '<a href="https://en.wikipedia.org/wiki/Power-on_hours" target="_blank">Power-on time</a> is the length of time the device is supplied with power.'
+ },
+ 'nvme.device_unsafe_shutdowns_count': {
+ info: 'The number of times a power outage occurred without a shutdown notification being sent. Depending on the NVMe device you are using, an unsafe shutdown can corrupt user data.'
+ },
+ 'nvme.device_critical_warnings_state': {
+ info: '<p>Critical warnings for the status of the controller. Status active if set to 1.</p><p><b>AvailableSpare</b> - the available spare capacity is below the threshold. <b>TempThreshold</b> - the composite temperature is greater than or equal to an over temperature threshold or less than or equal to an under temperature threshold. <b>NvmSubsystemReliability</b> - the NVM subsystem reliability is degraded due to excessive media or internal errors. <b>ReadOnly</b> - media is placed in read-only mode. <b>VolatileMemBackupFailed</b> - the volatile memory backup device has failed. <b>PersistentMemoryReadOnly</b> - the Persistent Memory Region has become read-only or unreliable.</p>'
+ },
+ 'nvme.device_media_errors_rate': {
+ info: 'The number of occurrences where the controller detected an unrecovered data integrity error. Errors such as uncorrectable ECC, CRC checksum failure, or LBA tag mismatch are included in this counter.'
+ },
+ 'nvme.device_error_log_entries_rate': {
+ info: 'The number of entries in the Error Information Log. By itself, an increase in the number of records is not an indicator of any failure condition.'
+ },
+ 'nvme.device_warning_composite_temperature_time': {
+ info: 'The time the device has been operating above the Warning Composite Temperature Threshold (WCTEMP) and below Critical Composite Temperature Threshold (CCTEMP).'
+ },
+ 'nvme.device_critical_composite_temperature_time': {
+ info: 'The time the device has been operating above the Critical Composite Temperature Threshold (CCTEMP).'
+ },
+ 'nvme.device_thermal_mgmt_temp1_transitions_rate': {
+ info: 'The number of times the controller has entered lower active power states or performed vendor-specific thermal management actions, <b>minimizing performance impact</b>, to attempt to lower the Composite Temperature due to the host-managed thermal management feature.'
+ },
+ 'nvme.device_thermal_mgmt_temp2_transitions_rate': {
+ info: 'The number of times the controller has entered lower active power states or performed vendor-specific thermal management actions, <b>regardless of the impact on performance (e.g., heavy throttling)</b>, to attempt to lower the Combined Temperature due to the host-managed thermal management feature.'
+ },
+ 'nvme.device_thermal_mgmt_temp1_time': {
+ info: 'The amount of time the controller has entered lower active power states or performed vendor-specific thermal management actions, <b>minimizing performance impact</b>, to attempt to lower the Composite Temperature due to the host-managed thermal management feature.'
+ },
+ 'nvme.device_thermal_mgmt_temp2_time': {
+ info: 'The amount of time the controller has entered lower active power states or performed vendor-specific thermal management actions, <b>regardless of the impact on performance (e.g., heavy throttling)</b>, to attempt to lower the Combined Temperature due to the host-managed thermal management feature.'
+ },
// ------------------------------------------------------------------------
};