summaryrefslogtreecommitdiffstats
path: root/web/gui/dashboard_info.js
diff options
context:
space:
mode:
Diffstat (limited to 'web/gui/dashboard_info.js')
-rw-r--r--web/gui/dashboard_info.js145
1 files changed, 105 insertions, 40 deletions
diff --git a/web/gui/dashboard_info.js b/web/gui/dashboard_info.js
index d4357777..35834aaf 100644
--- a/web/gui/dashboard_info.js
+++ b/web/gui/dashboard_info.js
@@ -427,7 +427,7 @@ netdataDashboard.menu = {
'web_log': {
title: undefined,
icon: '<i class="fas fa-file-alt"></i>',
- info: 'Information extracted from a server log file. <code>web_log</code> plugin incrementally parses the server log file to provide, in real-time, a break down of key server performance metrics. For web servers, an extended log file format may optionally be used (for <code>nginx</code> and <code>apache</code>) offering timing information and bandwidth for both requests and responses. <code>web_log</code> plugin may also be configured to provide a break down of requests per URL pattern (check <a href="https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/web_log/web_log.conf" target="_blank"><code>/etc/netdata/python.d/web_log.conf</code></a>).'
+ info: 'Information extracted from a server log file. <code>web_log</code> plugin incrementally parses the server log file to provide, in real-time, a break down of key server performance metrics. For web servers, an extended log file format may optionally be used (for <code>nginx</code> and <code>apache</code>) offering timing information and bandwidth for both requests and responses. <code>web_log</code> plugin may also be configured to provide a break down of requests per URL pattern (check <a href="https://github.com/netdata/go.d.plugin/blob/master/config/go.d/web_log.conf" target="_blank"><code>/etc/netdata/go.d/web_log.conf</code></a>).'
},
'squid': {
@@ -739,7 +739,7 @@ netdataDashboard.submenu = {
},
'web_log.urls': {
- info: 'Number of requests for each <code>URL pattern</code> defined in <a href="https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/web_log/web_log.conf" target="_blank"><code>/etc/netdata/python.d/web_log.conf</code></a>. This chart counts all requests matching the URL patterns defined, independently of the web server response codes (i.e. both successful and unsuccessful).'
+ info: 'Number of requests for each <code>URL pattern</code> defined in <a href="https://github.com/netdata/go.d.plugin/blob/master/config/go.d/web_log.conf" target="_blank"><code>/etc/netdata/go.d/web_log.conf</code></a>. This chart counts all requests matching the URL patterns defined, independently of the web server response codes (i.e. both successful and unsuccessful).'
},
'web_log.clients': {
@@ -1309,29 +1309,61 @@ netdataDashboard.context = {
height: 0.7
},
- 'system.cpu_pressure': {
- info: '<a href="https://www.kernel.org/doc/html/latest/accounting/psi.html" target="_blank">Pressure Stall Information</a> ' +
- 'identifies and quantifies the disruptions caused by resource contentions. ' +
- 'The "some" line indicates the share of time in which at least <b>some</b> tasks are stalled on CPU. ' +
- 'The ratios (in %) are tracked as recent trends over 10-, 60-, and 300-second windows.'
+ 'system.cpu_some_pressure': {
+ info: 'CPU <a href="https://www.kernel.org/doc/html/latest/accounting/psi.html" target="_blank">Pressure Stall Information</a>. '+
+ '<b>Some</b> indicates the share of time in which at least <b>some tasks</b> are stalled on CPU. ' +
+ 'The ratios are tracked as recent trends over 10-, 60-, and 300-second windows.'
+ },
+ 'system.cpu_some_pressure_stall_time': {
+ info: 'The amount of time some processes have been waiting for CPU time.'
+ },
+ 'system.cpu_full_pressure': {
+ info: 'CPU <a href="https://www.kernel.org/doc/html/latest/accounting/psi.html" target="_blank">Pressure Stall Information</a>. ' +
+ '<b>Full</b> indicates the share of time in which <b>all non-idle tasks</b> are stalled on CPU resource simultaneously. ' +
+ 'The ratios are tracked as recent trends over 10-, 60-, and 300-second windows.'
+ },
+ 'system.cpu_full_pressure_stall_time': {
+ info: 'The amount of time all non-idle processes have been stalled due to CPU congestion.'
},
'system.memory_some_pressure': {
- info: '<a href="https://www.kernel.org/doc/html/latest/accounting/psi.html" target="_blank">Pressure Stall Information</a> ' +
- 'identifies and quantifies the disruptions caused by resource contentions. ' +
- 'The "some" line indicates the share of time in which at least <b>some</b> tasks are stalled on memory. ' +
- 'The "full" line indicates the share of time in which <b>all non-idle</b> tasks are stalled on memory simultaneously. ' +
- 'In this state actual CPU cycles are going to waste, and a workload that spends extended time in this state is considered to be thrashing. ' +
- 'The ratios (in %) are tracked as recent trends over 10-, 60-, and 300-second windows.'
+ info: 'Memory <a href="https://www.kernel.org/doc/html/latest/accounting/psi.html" target="_blank">Pressure Stall Information</a>. '+
+ '<b>Some</b> indicates the share of time in which at least <b>some tasks</b> are stalled on memory. ' +
+ 'In this state the CPU is still doing productive work. '+
+ 'The ratios are tracked as recent trends over 10-, 60-, and 300-second windows.'
+ },
+ 'system.memory_some_pressure_stall_time': {
+ info: 'The amount of time some processes have been waiting due to memory congestion.'
+ },
+ 'system.memory_full_pressure': {
+ info: 'Memory <a href="https://www.kernel.org/doc/html/latest/accounting/psi.html" target="_blank">Pressure Stall Information</a>. ' +
+ '<b>Full</b> indicates the share of time in which <b>all non-idle tasks</b> are stalled on memory resource simultaneously. ' +
+ 'In this state actual CPU cycles are going to waste, and a workload that spends extended time in this state is considered to be thrashing. '+
+ 'This has severe impact on performance. '+
+ 'The ratios are tracked as recent trends over 10-, 60-, and 300-second windows.'
+ },
+ 'system.memory_full_pressure_stall_time': {
+ info: 'The amount of time all non-idle processes have been stalled due to memory congestion.'
},
'system.io_some_pressure': {
- info: '<a href="https://www.kernel.org/doc/html/latest/accounting/psi.html" target="_blank">Pressure Stall Information</a> ' +
- 'identifies and quantifies the disruptions caused by resource contentions. ' +
- 'The "some" line indicates the share of time in which at least <b>some</b> tasks are stalled on I/O. ' +
- 'The "full" line indicates the share of time in which <b>all non-idle</b> tasks are stalled on I/O simultaneously. ' +
- 'In this state actual CPU cycles are going to waste, and a workload that spends extended time in this state is considered to be thrashing. ' +
- 'The ratios (in %) are tracked as recent trends over 10-, 60-, and 300-second windows.'
+ info: 'I/O <a href="https://www.kernel.org/doc/html/latest/accounting/psi.html" target="_blank">Pressure Stall Information</a>. '+
+ '<b>Some</b> indicates the share of time in which at least <b>some tasks</b> are stalled on I/O. ' +
+ 'In this state the CPU is still doing productive work. '+
+ 'The ratios are tracked as recent trends over 10-, 60-, and 300-second windows.'
+ },
+ 'system.io_some_pressure_stall_time': {
+ info: 'The amount of time some processes have been waiting due to I/O congestion.'
+ },
+ 'system.io_full_pressure': {
+ info: 'I/O <a href="https://www.kernel.org/doc/html/latest/accounting/psi.html" target="_blank">Pressure Stall Information</a>. ' +
+ '<b>Full</b> line indicates the share of time in which <b>all non-idle tasks</b> are stalled on I/O resource simultaneously. ' +
+ 'In this state actual CPU cycles are going to waste, and a workload that spends extended time in this state is considered to be thrashing. '+
+ 'This has severe impact on performance. '+
+ 'The ratios are tracked as recent trends over 10-, 60-, and 300-second windows.'
+ },
+ 'system.io_full_pressure_stall_time': {
+ info: 'The amount of time all non-idle processes have been stalled due to I/O congestion.'
},
'system.io': {
@@ -4026,14 +4058,33 @@ netdataDashboard.context = {
'When an application has used its allotted CPU quota for a given period, it gets throttled until the next period.'
},
+ 'cgroup.cpu_shares': {
+ info: '<p>The weight of each group living in the same hierarchy, that translates into the amount of CPU it is expected to get. '+
+ 'The percentage of CPU assigned to the cgroup is the value of shares divided by the sum of all shares in all cgroups in the same level.</p>'+
+ '<p>For example, tasks in two cgroups that have <b>cpu.shares</b> set to 100 will receive equal CPU time, '+
+ 'but tasks in a cgroup that has <b>cpu.shares</b> set to 200 receive twice the CPU time of tasks in a cgroup where <b>cpu.shares</b> is set to 100.</p>'
+ },
+
'cgroup.cpu_per_core': {
info: 'Total CPU utilization per core within the system-wide CPU resources.'
},
- 'cgroup.cpu_pressure': {
+ 'cgroup.cpu_some_pressure': {
info: 'CPU <a href="https://www.kernel.org/doc/html/latest/accounting/psi.html" target="_blank">Pressure Stall Information</a>. '+
- '<b>Some</b> indicates the share of time in which at least some tasks are stalled on CPU. '+
- 'The ratios (in %) are tracked as recent trends over 10-, 60-, and 300-second windows.'
+ '<b>Some</b> indicates the share of time in which at least <b>some tasks</b> are stalled on CPU. ' +
+ 'The ratios are tracked as recent trends over 10-, 60-, and 300-second windows.'
+ },
+ 'cgroup.cpu_some_pressure_stall_time': {
+ info: 'The amount of time some processes have been waiting for CPU time.'
+ },
+
+ 'cgroup.cpu_full_pressure': {
+ info: 'CPU <a href="https://www.kernel.org/doc/html/latest/accounting/psi.html" target="_blank">Pressure Stall Information</a>. ' +
+ '<b>Full</b> indicates the share of time in which <b>all non-idle tasks</b> are stalled on CPU resource simultaneously. ' +
+ 'The ratios are tracked as recent trends over 10-, 60-, and 300-second windows.'
+ },
+ 'cgroup.cpu_full_pressure_stall_time': {
+ info: 'The amount of time all non-idle processes have been stalled due to CPU congestion.'
},
'cgroup.mem_utilization': {
@@ -4119,18 +4170,25 @@ netdataDashboard.context = {
'<b>Swap</b> - major page faults.</p>'
},
- 'cgroup.memory_pressure': {
+ 'cgroup.memory_some_pressure': {
info: 'Memory <a href="https://www.kernel.org/doc/html/latest/accounting/psi.html" target="_blank">Pressure Stall Information</a>. '+
- '<b>Some</b> indicates the share of time in which at least some tasks are stalled on memory. '+
- 'The ratios (in %) are tracked as recent trends over 10-, 60-, and 300-second windows.'
+ '<b>Some</b> indicates the share of time in which at least <b>some tasks</b> are stalled on memory. ' +
+ 'In this state the CPU is still doing productive work. '+
+ 'The ratios are tracked as recent trends over 10-, 60-, and 300-second windows.'
+ },
+ 'cgroup.memory_some_pressure_stall_time': {
+ info: 'The amount of time some processes have been waiting due to memory congestion.'
},
'cgroup.memory_full_pressure': {
- info: 'Memory <a href="https://www.kernel.org/doc/html/latest/accounting/psi.html" target="_blank">Pressure Stall Information</a>. '+
- '<b>Full</b> indicates the share of time in which all non-idle tasks are stalled on memory simultaneously. '+
- 'In this state actual CPU cycles are going to waste, '+
- 'and a workload that spends extended time in this state is considered to be thrashing. '+
- 'The ratios (in %) are tracked as recent trends over 10-, 60-, and 300-second windows.'
+ info: 'Memory <a href="https://www.kernel.org/doc/html/latest/accounting/psi.html" target="_blank">Pressure Stall Information</a>. ' +
+ '<b>Full</b> indicates the share of time in which <b>all non-idle tasks</b> are stalled on memory resource simultaneously. ' +
+ 'In this state actual CPU cycles are going to waste, and a workload that spends extended time in this state is considered to be thrashing. '+
+ 'This has severe impact on performance. '+
+ 'The ratios are tracked as recent trends over 10-, 60-, and 300-second windows.'
+ },
+ 'cgroup.memory_full_pressure_stall_time': {
+ info: 'The amount of time all non-idle processes have been stalled due to memory congestion.'
},
'cgroup.io': {
@@ -4190,18 +4248,25 @@ netdataDashboard.context = {
info: 'The number of I/O operations performed on specific devices as seen by the throttling policy.'
},
- 'cgroup.io_pressure': {
+ 'cgroup.io_some_pressure': {
info: 'I/O <a href="https://www.kernel.org/doc/html/latest/accounting/psi.html" target="_blank">Pressure Stall Information</a>. '+
- '<b>Some</b> indicates the share of time in which at least some tasks are stalled on I/O. '+
- 'The ratios (in %) are tracked as recent trends over 10-, 60-, and 300-second windows.'
+ '<b>Some</b> indicates the share of time in which at least <b>some tasks</b> are stalled on I/O. ' +
+ 'In this state the CPU is still doing productive work. '+
+ 'The ratios are tracked as recent trends over 10-, 60-, and 300-second windows.'
+ },
+ 'cgroup.io_some_pressure_stall_time': {
+ info: 'The amount of time some processes have been waiting due to I/O congestion.'
},
'cgroup.io_full_pressure': {
- info: 'I/O <a href="https://www.kernel.org/doc/html/latest/accounting/psi.html" target="_blank">Pressure Stall Information</a>. '+
- '<b>Full</b> indicates the share of time in which all non-idle tasks are stalled on I/O simultaneously. '+
- 'In this state actual CPU cycles are going to waste, '+
- 'and a workload that spends extended time in this state is considered to be thrashing. '+
- 'The ratios (in %) are tracked as recent trends over 10-, 60-, and 300-second windows.'
+ info: 'I/O <a href="https://www.kernel.org/doc/html/latest/accounting/psi.html" target="_blank">Pressure Stall Information</a>. ' +
+ '<b>Full</b> line indicates the share of time in which <b>all non-idle tasks</b> are stalled on I/O resource simultaneously. ' +
+ 'In this state actual CPU cycles are going to waste, and a workload that spends extended time in this state is considered to be thrashing. '+
+ 'This has severe impact on performance. '+
+ 'The ratios are tracked as recent trends over 10-, 60-, and 300-second windows.'
+ },
+ 'cgroup.io_full_pressure_stall_time': {
+ info: 'The amount of time all non-idle processes have been stalled due to I/O congestion.'
},
'cgroup.swap_read': {
@@ -4931,7 +4996,7 @@ netdataDashboard.context = {
},
'web_log.clients_all': {
- info: 'Unique client IPs accessing the web server since the last restart of netdata. This plugin keeps in memory all the unique IPs that have accessed the web server. On very busy web servers (several millions of unique IPs) you may want to disable this chart (check <a href="https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/web_log/web_log.conf" target="_blank"><code>/etc/netdata/python.d/web_log.conf</code></a>).'
+ info: 'Unique client IPs accessing the web server since the last restart of netdata. This plugin keeps in memory all the unique IPs that have accessed the web server. On very busy web servers (several millions of unique IPs) you may want to disable this chart (check <a href="https://github.com/netdata/go.d.plugin/blob/master/config/go.d/web_log.conf" target="_blank"><code>/etc/netdata/go.d/web_log.conf</code></a>).'
},
// ------------------------------------------------------------------------
@@ -5062,7 +5127,7 @@ netdataDashboard.context = {
},
'web_log.squid_clients_all': {
- info: 'Unique client IPs accessing squid since the last restart of netdata. This plugin keeps in memory all the unique IPs that have accessed the server. On very busy squid servers (several millions of unique IPs) you may want to disable this chart (check <a href="https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/web_log/web_log.conf" target="_blank"><code>/etc/netdata/python.d/web_log.conf</code></a>).'
+ info: 'Unique client IPs accessing squid since the last restart of netdata. This plugin keeps in memory all the unique IPs that have accessed the server. On very busy squid servers (several millions of unique IPs) you may want to disable this chart (check <a href="https://github.com/netdata/go.d.plugin/blob/master/config/go.d/web_log.conf" target="_blank"><code>/etc/netdata/go.d/web_log.conf</code></a>).'
},
'web_log.squid_transport_methods': {