summaryrefslogtreecommitdiffstats
path: root/monitoring/ceph-mixin/dashboards
diff options
context:
space:
mode:
Diffstat (limited to 'monitoring/ceph-mixin/dashboards')
-rw-r--r--monitoring/ceph-mixin/dashboards/cephfs.libsonnet89
-rw-r--r--monitoring/ceph-mixin/dashboards/host.libsonnet723
-rw-r--r--monitoring/ceph-mixin/dashboards/osd.libsonnet593
-rw-r--r--monitoring/ceph-mixin/dashboards/pool.libsonnet552
-rw-r--r--monitoring/ceph-mixin/dashboards/rbd.libsonnet337
-rw-r--r--monitoring/ceph-mixin/dashboards/rgw.libsonnet872
-rw-r--r--monitoring/ceph-mixin/dashboards/utils.libsonnet333
7 files changed, 3499 insertions, 0 deletions
diff --git a/monitoring/ceph-mixin/dashboards/cephfs.libsonnet b/monitoring/ceph-mixin/dashboards/cephfs.libsonnet
new file mode 100644
index 000000000..d12d9f4dd
--- /dev/null
+++ b/monitoring/ceph-mixin/dashboards/cephfs.libsonnet
@@ -0,0 +1,89 @@
+local g = import 'grafonnet/grafana.libsonnet';
+
+(import 'utils.libsonnet') {
+ 'cephfs-overview.json':
+ $.dashboardSchema(
+ 'MDS Performance',
+ '',
+ 'tbO9LAiZz',
+ 'now-1h',
+ '30s',
+ 16,
+ $._config.dashboardTags,
+ ''
+ )
+ .addAnnotation(
+ $.addAnnotationSchema(
+ 1,
+ '-- Grafana --',
+ true,
+ true,
+ 'rgba(0, 211, 255, 1)',
+ 'Annotations & Alerts',
+ 'dashboard'
+ )
+ )
+ .addRequired(
+ type='grafana', id='grafana', name='Grafana', version='5.3.2'
+ )
+ .addRequired(
+ type='panel', id='graph', name='Graph', version='5.0.0'
+ )
+ .addTemplate(
+ g.template.datasource('datasource', 'prometheus', 'default', label='Data Source')
+ )
+ .addTemplate(
+ $.addClusterTemplate()
+ )
+ .addTemplate(
+ $.addJobTemplate()
+ )
+ .addTemplate(
+ $.addTemplateSchema('mds_servers',
+ '$datasource',
+ 'label_values(ceph_mds_inodes{%(matchers)s}, ceph_daemon)' % $.matchers(),
+ 1,
+ true,
+ 1,
+ 'MDS Server',
+ '')
+ )
+ .addPanels([
+ $.addRowSchema(false, true, 'MDS Performance') + { gridPos: { x: 0, y: 0, w: 24, h: 1 } },
+ $.simpleGraphPanel(
+ {},
+ 'MDS Workload - $mds_servers',
+ '',
+ 'none',
+ 'Reads(-) / Writes (+)',
+ 0,
+ 'sum(rate(ceph_objecter_op_r{%(matchers)s, ceph_daemon=~"($mds_servers).*"}[$__rate_interval]))' % $.matchers(),
+ 'Read Ops',
+ 0,
+ 1,
+ 12,
+ 9
+ )
+ .addTarget($.addTargetSchema(
+ 'sum(rate(ceph_objecter_op_w{%(matchers)s, ceph_daemon=~"($mds_servers).*"}[$__rate_interval]))' % $.matchers(),
+ 'Write Ops'
+ ))
+ .addSeriesOverride(
+ { alias: '/.*Reads/', transform: 'negative-Y' }
+ ),
+ $.simpleGraphPanel(
+ {},
+ 'Client Request Load - $mds_servers',
+ '',
+ 'none',
+ 'Client Requests',
+ 0,
+ 'ceph_mds_server_handle_client_request{%(matchers)s, ceph_daemon=~"($mds_servers).*"}' % $.matchers(),
+ '{{ceph_daemon}}',
+ 12,
+ 1,
+ 12,
+ 9
+ ),
+ ]),
+}
diff --git a/monitoring/ceph-mixin/dashboards/host.libsonnet b/monitoring/ceph-mixin/dashboards/host.libsonnet
new file mode 100644
index 000000000..3e0b31f2c
--- /dev/null
+++ b/monitoring/ceph-mixin/dashboards/host.libsonnet
@@ -0,0 +1,723 @@
+local g = import 'grafonnet/grafana.libsonnet';
+
+(import 'utils.libsonnet') {
+ 'hosts-overview.json':
+ $.dashboardSchema(
+ 'Host Overview',
+ '',
+ 'y0KGL0iZz',
+ 'now-1h',
+ '30s',
+ 16,
+ $._config.dashboardTags,
+ '',
+ )
+ .addRequired(
+ type='grafana', id='grafana', name='Grafana', version='5.3.2'
+ )
+ .addRequired(
+ type='panel', id='graph', name='Graph', version='5.0.0'
+ )
+ .addRequired(
+ type='panel', id='singlestat', name='Singlestat', version='5.0.0'
+ )
+ .addAnnotation(
+ $.addAnnotationSchema(
+ 1,
+ '-- Grafana --',
+ true,
+ true,
+ 'rgba(0, 211, 255, 1)',
+ 'Annotations & Alerts',
+ 'dashboard'
+ )
+ )
+ .addTemplate(
+ g.template.datasource('datasource',
+ 'prometheus',
+ 'default',
+ label='Data Source')
+ )
+ .addTemplate(
+ $.addClusterTemplate()
+ )
+ .addTemplate(
+ $.addJobTemplate()
+ )
+ .addTemplate(
+ $.addTemplateSchema('osd_hosts',
+ '$datasource',
+ 'label_values(ceph_disk_occupation{%(matchers)s}, exported_instance)' % $.matchers(),
+ 1,
+ true,
+ 1,
+ null,
+ '([^.]*).*')
+ )
+ .addTemplate(
+ $.addTemplateSchema('mon_hosts',
+ '$datasource',
+ 'label_values(ceph_mon_metadata{%(matchers)s}, ceph_daemon)' % $.matchers(),
+ 1,
+ true,
+ 1,
+ null,
+ 'mon.(.*)')
+ )
+ .addTemplate(
+ $.addTemplateSchema('mds_hosts',
+ '$datasource',
+ 'label_values(ceph_mds_inodes{%(matchers)s}, ceph_daemon)' % $.matchers(),
+ 1,
+ true,
+ 1,
+ null,
+ 'mds.(.*)')
+ )
+ .addTemplate(
+ $.addTemplateSchema('rgw_hosts',
+ '$datasource',
+ 'label_values(ceph_rgw_metadata{%(matchers)s}, ceph_daemon)' % $.matchers(),
+ 1,
+ true,
+ 1,
+ null,
+ 'rgw.(.*)')
+ )
+ .addPanels([
+ $.simpleSingleStatPanel(
+ 'none',
+ 'OSD Hosts',
+ '',
+ 'current',
+ 'count(sum by (hostname) (ceph_osd_metadata{%(matchers)s}))' % $.matchers(),
+ true,
+ 'time_series',
+ 0,
+ 0,
+ 4,
+ 5
+ ),
+ $.simpleSingleStatPanel(
+ 'percentunit',
+ 'AVG CPU Busy',
+ 'Average CPU busy across all hosts (OSD, RGW, MON etc) within the cluster',
+ 'current',
+ |||
+ avg(1 - (
+ avg by(instance) (
+ rate(node_cpu_seconds_total{mode='idle',instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*"}[$__rate_interval]) or
+ rate(node_cpu{mode='idle',instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*"}[$__rate_interval])
+ )
+ ))
+ |||,
+ true,
+ 'time_series',
+ 4,
+ 0,
+ 4,
+ 5
+ ),
+ $.simpleSingleStatPanel(
+ 'percentunit',
+ 'AVG RAM Utilization',
+ 'Average Memory Usage across all hosts in the cluster (excludes buffer/cache usage)',
+ 'current',
+ |||
+ avg ((
+ (
+ node_memory_MemTotal{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*"} or
+ node_memory_MemTotal_bytes{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*"}
+ ) - ((
+ node_memory_MemFree{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*"} or
+ node_memory_MemFree_bytes{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*"}) +
+ (
+ node_memory_Cached{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*"} or
+ node_memory_Cached_bytes{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*"}
+ ) + (
+ node_memory_Buffers{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*"} or
+ node_memory_Buffers_bytes{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*"}
+ ) + (
+ node_memory_Slab{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*"} or
+ node_memory_Slab_bytes{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*"}
+ )
+ )
+ ) / (
+ node_memory_MemTotal{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*"} or
+ node_memory_MemTotal_bytes{instance=~"($osd_hosts|$rgw_hosts|$mon_hosts|$mds_hosts).*"}
+ ))
+ |||,
+ true,
+ 'time_series',
+ 8,
+ 0,
+ 4,
+ 5
+ ),
+ $.simpleSingleStatPanel(
+ 'none',
+ 'Physical IOPS',
+ 'IOPS Load at the device as reported by the OS on all OSD hosts',
+ 'current',
+ |||
+ sum ((
+ rate(node_disk_reads_completed{instance=~"($osd_hosts).*"}[$__rate_interval]) or
+ rate(node_disk_reads_completed_total{instance=~"($osd_hosts).*"}[$__rate_interval])
+ ) + (
+ rate(node_disk_writes_completed{instance=~"($osd_hosts).*"}[$__rate_interval]) or
+ rate(node_disk_writes_completed_total{instance=~"($osd_hosts).*"}[$__rate_interval])
+ ))
+ |||,
+ true,
+ 'time_series',
+ 12,
+ 0,
+ 4,
+ 5
+ ),
+ $.simpleSingleStatPanel(
+ 'percent',
+ 'AVG Disk Utilization',
+ 'Average Disk utilization for all OSD data devices (i.e. excludes journal/WAL)',
+ 'current',
+ |||
+ avg (
+ label_replace(
+ (rate(node_disk_io_time_ms[$__rate_interval]) / 10 ) or
+ (rate(node_disk_io_time_seconds_total[$__rate_interval]) * 100),
+ "instance", "$1", "instance", "([^.:]*).*"
+ ) * on(instance, device) group_left(ceph_daemon) label_replace(
+ label_replace(
+ ceph_disk_occupation_human{%(matchers)s, instance=~"($osd_hosts).*"},
+ "device", "$1", "device", "/dev/(.*)"
+ ), "instance", "$1", "instance", "([^.:]*).*"
+ )
+ )
+ ||| % $.matchers(),
+ true,
+ 'time_series',
+ 16,
+ 0,
+ 4,
+ 5
+ ),
+ $.simpleSingleStatPanel(
+ 'bytes',
+ 'Network Load',
+ 'Total send/receive network load across all hosts in the ceph cluster',
+ 'current',
+ |||
+ sum (
+ (
+ rate(node_network_receive_bytes{instance=~"($osd_hosts|mon_hosts|mds_hosts|rgw_hosts).*",device!="lo"}[$__rate_interval]) or
+ rate(node_network_receive_bytes_total{instance=~"($osd_hosts|mon_hosts|mds_hosts|rgw_hosts).*",device!="lo"}[$__rate_interval])
+ ) unless on (device, instance)
+ label_replace((bonding_slaves > 0), "device", "$1", "master", "(.+)")
+ ) +
+ sum (
+ (
+ rate(node_network_transmit_bytes{instance=~"($osd_hosts|mon_hosts|mds_hosts|rgw_hosts).*",device!="lo"}[$__rate_interval]) or
+ rate(node_network_transmit_bytes_total{instance=~"($osd_hosts|mon_hosts|mds_hosts|rgw_hosts).*",device!="lo"}[$__rate_interval])
+ ) unless on (device, instance)
+ label_replace((bonding_slaves > 0), "device", "$1", "master", "(.+)")
+ )
+ |||,
+ true,
+ 'time_series',
+ 20,
+ 0,
+ 4,
+ 5
+ ),
+ $.simpleGraphPanel(
+ {},
+ 'CPU Busy - Top 10 Hosts',
+ 'Show the top 10 busiest hosts by cpu',
+ 'percent',
+ null,
+ 0,
+ |||
+ topk(10,
+ 100 * (
+ 1 - (
+ avg by(instance) (
+ rate(node_cpu_seconds_total{mode='idle',instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*"}[$__rate_interval]) or
+ rate(node_cpu{mode='idle',instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*"}[$__rate_interval])
+ )
+ )
+ )
+ )
+ |||,
+ '{{instance}}',
+ 0,
+ 5,
+ 12,
+ 9
+ ),
+ $.simpleGraphPanel(
+ {},
+ 'Network Load - Top 10 Hosts',
+ 'Top 10 hosts by network load',
+ 'Bps',
+ null,
+ 0,
+ |||
+ topk(10, (sum by(instance) (
+ (
+ rate(node_network_receive_bytes{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*",device!="lo"}[$__rate_interval]) or
+ rate(node_network_receive_bytes_total{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*",device!="lo"}[$__rate_interval])
+ ) +
+ (
+ rate(node_network_transmit_bytes{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*",device!="lo"}[$__rate_interval]) or
+ rate(node_network_transmit_bytes_total{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*",device!="lo"}[$__rate_interval])
+ ) unless on (device, instance)
+ label_replace((bonding_slaves > 0), "device", "$1", "master", "(.+)"))
+ ))
+ |||,
+ '{{instance}}',
+ 12,
+ 5,
+ 12,
+ 9
+ ),
+ ]),
+ 'host-details.json':
+ $.dashboardSchema(
+ 'Host Details',
+ '',
+ 'rtOg0AiWz',
+ 'now-1h',
+ '30s',
+ 16,
+ $._config.dashboardTags + ['overview'],
+ ''
+ )
+ .addRequired(
+ type='grafana', id='grafana', name='Grafana', version='5.3.2'
+ )
+ .addRequired(
+ type='panel', id='graph', name='Graph', version='5.0.0'
+ )
+ .addRequired(
+ type='panel', id='singlestat', name='Singlestat', version='5.0.0'
+ )
+ .addAnnotation(
+ $.addAnnotationSchema(
+ 1, '-- Grafana --', true, true, 'rgba(0, 211, 255, 1)', 'Annotations & Alerts', 'dashboard'
+ )
+ )
+ .addTemplate(
+ g.template.datasource('datasource', 'prometheus', 'default', label='Data Source')
+ )
+ .addTemplate(
+ $.addClusterTemplate()
+ )
+ .addTemplate(
+ $.addJobTemplate()
+ )
+ .addTemplate(
+ $.addTemplateSchema('ceph_hosts',
+ '$datasource',
+ 'label_values({%(clusterMatcher)s}, instance)' % $.matchers(),
+ 1,
+ false,
+ 3,
+ 'Hostname',
+ '([^.:]*).*')
+ )
+ .addPanels([
+ $.addRowSchema(false, true, '$ceph_hosts System Overview') + { gridPos: { x: 0, y: 0, w: 24, h: 1 } },
+ $.simpleSingleStatPanel(
+ 'none',
+ 'OSDs',
+ '',
+ 'current',
+ "count(sum by (ceph_daemon) (ceph_osd_metadata{%(matchers)s, hostname='$ceph_hosts'}))" % $.matchers(),
+ null,
+ 'time_series',
+ 0,
+ 1,
+ 3,
+ 5
+ ),
+ $.simpleGraphPanel(
+ {
+ interrupt: '#447EBC',
+ steal: '#6D1F62',
+ system: '#890F02',
+ user: '#3F6833',
+ wait: '#C15C17',
+ },
+ 'CPU Utilization',
+ "Shows the CPU breakdown. When multiple servers are selected, only the first host's cpu data is shown",
+ 'percent',
+ '% Utilization',
+ null,
+ |||
+ sum by (mode) (
+ rate(node_cpu{instance=~"($ceph_hosts)([\\\\.:].*)?", mode=~"(irq|nice|softirq|steal|system|user|iowait)"}[$__rate_interval]) or
+ rate(node_cpu_seconds_total{instance=~"($ceph_hosts)([\\\\.:].*)?", mode=~"(irq|nice|softirq|steal|system|user|iowait)"}[$__rate_interval])
+ ) / (
+ scalar(
+ sum(rate(node_cpu{instance=~"($ceph_hosts)([\\\\.:].*)?"}[$__rate_interval]) or
+ rate(node_cpu_seconds_total{instance=~"($ceph_hosts)([\\\\.:].*)?"}[$__rate_interval]))
+ ) * 100
+ )
+ |||,
+ '{{mode}}',
+ 3,
+ 1,
+ 6,
+ 10
+ ),
+ $.simpleGraphPanel(
+ {
+ Available: '#508642',
+ Free: '#508642',
+ Total: '#bf1b00',
+ Used: '#bf1b00',
+ total: '#bf1b00',
+ used: '#0a50a1',
+ },
+ 'RAM Usage',
+ '',
+ 'bytes',
+ 'RAM used',
+ null,
+ |||
+ node_memory_MemFree{instance=~"$ceph_hosts([\\\\.:].*)?"} or
+ node_memory_MemFree_bytes{instance=~"$ceph_hosts([\\\\.:].*)?"}
+ |||,
+ 'Free',
+ 9,
+ 1,
+ 6,
+ 10
+ )
+ .addTargets(
+ [
+ $.addTargetSchema(
+ |||
+ node_memory_MemTotal{instance=~"$ceph_hosts([\\\\.:].*)?"} or
+ node_memory_MemTotal_bytes{instance=~"$ceph_hosts([\\\\.:].*)?"}
+ |||,
+ 'total'
+ ),
+ $.addTargetSchema(
+ |||
+ (
+ node_memory_Cached{instance=~"$ceph_hosts([\\\\.:].*)?"} or
+ node_memory_Cached_bytes{instance=~"$ceph_hosts([\\\\.:].*)?"}
+ ) + (
+ node_memory_Buffers{instance=~"$ceph_hosts([\\\\.:].*)?"} or
+ node_memory_Buffers_bytes{instance=~"$ceph_hosts([\\\\.:].*)?"}
+ ) + (
+ node_memory_Slab{instance=~"$ceph_hosts([\\\\.:].*)?"} or
+ node_memory_Slab_bytes{instance=~"$ceph_hosts([\\\\.:].*)?"}
+ )
+ |||,
+ 'buffers/cache'
+ ),
+ $.addTargetSchema(
+ |||
+ (
+ node_memory_MemTotal{instance=~"$ceph_hosts([\\\\.:].*)?"} or
+ node_memory_MemTotal_bytes{instance=~"$ceph_hosts([\\\\.:].*)?"}
+ ) - (
+ (
+ node_memory_MemFree{instance=~"$ceph_hosts([\\\\.:].*)?"} or
+ node_memory_MemFree_bytes{instance=~"$ceph_hosts([\\\\.:].*)?"}
+ ) + (
+ node_memory_Cached{instance=~"$ceph_hosts([\\\\.:].*)?"} or
+ node_memory_Cached_bytes{instance=~"$ceph_hosts([\\\\.:].*)?"}
+ ) + (
+ node_memory_Buffers{instance=~"$ceph_hosts([\\\\.:].*)?"} or
+ node_memory_Buffers_bytes{instance=~"$ceph_hosts([\\\\.:].*)?"}
+ ) +
+ (
+ node_memory_Slab{instance=~"$ceph_hosts([\\\\.:].*)?"} or
+ node_memory_Slab_bytes{instance=~"$ceph_hosts([\\\\.:].*)?"}
+ )
+ )
+ |||,
+ 'used'
+ ),
+ ]
+ )
+ .addSeriesOverride(
+ {
+ alias: 'total',
+ color: '#bf1b00',
+ fill: 0,
+ linewidth: 2,
+ stack: false,
+ }
+ ),
+ $.simpleGraphPanel(
+ {},
+ 'Network Load',
+ "Show the network load (rx,tx) across all interfaces (excluding loopback 'lo')",
+ 'decbytes',
+ 'Send (-) / Receive (+)',
+ null,
+ |||
+ sum by (device) (
+ rate(
+ node_network_receive_bytes{instance=~"($ceph_hosts)([\\\\.:].*)?",device!="lo"}[$__rate_interval]) or
+ rate(node_network_receive_bytes_total{instance=~"($ceph_hosts)([\\\\.:].*)?",device!="lo"}[$__rate_interval]
+ )
+ )
+ |||,
+ '{{device}}.rx',
+ 15,
+ 1,
+ 6,
+ 10
+ )
+ .addTargets(
+ [
+ $.addTargetSchema(
+ |||
+ sum by (device) (
+ rate(node_network_transmit_bytes{instance=~"($ceph_hosts)([\\\\.:].*)?",device!="lo"}[$__rate_interval]) or
+ rate(node_network_transmit_bytes_total{instance=~"($ceph_hosts)([\\\\.:].*)?",device!="lo"}[$__rate_interval])
+ )
+ |||,
+ '{{device}}.tx'
+ ),
+ ]
+ )
+ .addSeriesOverride(
+ { alias: '/.*tx/', transform: 'negative-Y' }
+ ),
+ $.simpleGraphPanel(
+ {},
+ 'Network drop rate',
+ '',
+ 'pps',
+ 'Send (-) / Receive (+)',
+ null,
+ |||
+ rate(node_network_receive_drop{instance=~"$ceph_hosts([\\\\.:].*)?"}[$__rate_interval]) or
+ rate(node_network_receive_drop_total{instance=~"$ceph_hosts([\\\\.:].*)?"}[$__rate_interval])
+ |||,
+ '{{device}}.rx',
+ 21,
+ 1,
+ 3,
+ 5
+ )
+ .addTargets(
+ [
+ $.addTargetSchema(
+ |||
+ rate(node_network_transmit_drop{instance=~"$ceph_hosts([\\\\.:].*)?"}[$__rate_interval]) or
+ rate(node_network_transmit_drop_total{instance=~"$ceph_hosts([\\\\.:].*)?"}[$__rate_interval])
+ |||,
+ '{{device}}.tx'
+ ),
+ ]
+ )
+ .addSeriesOverride(
+ {
+ alias: '/.*tx/',
+ transform: 'negative-Y',
+ }
+ ),
+ $.simpleSingleStatPanel(
+ 'bytes',
+ 'Raw Capacity',
+ 'Each OSD consists of a Journal/WAL partition and a data partition. The RAW Capacity shown is the sum of the data partitions across all OSDs on the selected OSD hosts.',
+ 'current',
+ |||
+ sum(
+ ceph_osd_stat_bytes{%(matchers)s} and
+ on (ceph_daemon) ceph_disk_occupation{%(matchers)s, instance=~"($ceph_hosts)([\\\\.:].*)?"}
+ )
+ ||| % $.matchers(),
+ null,
+ 'time_series',
+ 0,
+ 6,
+ 3,
+ 5
+ ),
+ $.simpleGraphPanel(
+ {},
+ 'Network error rate',
+ '',
+ 'pps',
+ 'Send (-) / Receive (+)',
+ null,
+ |||
+ rate(node_network_receive_errs{instance=~"$ceph_hosts([\\\\.:].*)?"}[$__rate_interval]) or
+ rate(node_network_receive_errs_total{instance=~"$ceph_hosts([\\\\.:].*)?"}[$__rate_interval])
+ |||,
+ '{{device}}.rx',
+ 21,
+ 6,
+ 3,
+ 5
+ )
+ .addTargets(
+ [$.addTargetSchema(
+ |||
+ rate(node_network_transmit_errs{instance=~"$ceph_hosts([\\\\.:].*)?"}[$__rate_interval]) or
+ rate(node_network_transmit_errs_total{instance=~"$ceph_hosts([\\\\.:].*)?"}[$__rate_interval])
+ |||,
+ '{{device}}.tx'
+ )]
+ )
+ .addSeriesOverride(
+ {
+ alias: '/.*tx/',
+ transform: 'negative-Y',
+ }
+ ),
+ $.addRowSchema(false,
+ true,
+ 'OSD Disk Performance Statistics') + { gridPos: { x: 0, y: 11, w: 24, h: 1 } },
+ $.simpleGraphPanel(
+ {},
+ '$ceph_hosts Disk IOPS',
+ "For any OSD devices on the host, this chart shows the iops per physical device. Each device is shown by it's name and corresponding OSD id value",
+ 'ops',
+ 'Read (-) / Write (+)',
+ null,
+ |||
+ label_replace(
+ (
+ rate(node_disk_writes_completed{instance=~"($ceph_hosts)([\\\\.:].*)?"}[$__rate_interval]) or
+ rate(node_disk_writes_completed_total{instance=~"($ceph_hosts)([\\\\.:].*)?"}[$__rate_interval])
+ ), "instance", "$1", "instance", "([^:.]*).*"
+ ) * on(instance, device) group_left(ceph_daemon) label_replace(
+ label_replace(
+ ceph_disk_occupation_human{%(matchers)s}, "device", "$1", "device", "/dev/(.*)"
+ ), "instance", "$1", "instance", "([^:.]*).*"
+ )
+ ||| % $.matchers(),
+ '{{device}}({{ceph_daemon}}) writes',
+ 0,
+ 12,
+ 11,
+ 9
+ )
+ .addTargets(
+ [
+ $.addTargetSchema(
+ |||
+ label_replace(
+ (
+ rate(node_disk_reads_completed{instance=~"($ceph_hosts)([\\\\.:].*)?"}[$__rate_interval]) or
+ rate(node_disk_reads_completed_total{instance=~"($ceph_hosts)([\\\\.:].*)?"}[$__rate_interval])
+ ), "instance", "$1", "instance", "([^:.]*).*"
+ ) * on(instance, device) group_left(ceph_daemon) label_replace(
+ label_replace(
+ ceph_disk_occupation_human{%(matchers)s},"device", "$1", "device", "/dev/(.*)"
+ ), "instance", "$1", "instance", "([^:.]*).*"
+ )
+ ||| % $.matchers(),
+ '{{device}}({{ceph_daemon}}) reads'
+ ),
+ ]
+ )
+ .addSeriesOverride(
+ { alias: '/.*reads/', transform: 'negative-Y' }
+ ),
+ $.simpleGraphPanel(
+ {},
+ '$ceph_hosts Throughput by Disk',
+ 'For OSD hosts, this chart shows the disk bandwidth (read bytes/sec + write bytes/sec) of the physical OSD device. Each device is shown by device name, and corresponding OSD id',
+ 'Bps',
+ 'Read (-) / Write (+)',
+ null,
+ |||
+ label_replace(
+ (
+ rate(node_disk_bytes_written{instance=~"($ceph_hosts)([\\\\.:].*)?"}[$__rate_interval]) or
+ rate(node_disk_written_bytes_total{instance=~"($ceph_hosts)([\\\\.:].*)?"}[$__rate_interval])
+ ), "instance", "$1", "instance", "([^:.]*).*") * on(instance, device)
+ group_left(ceph_daemon) label_replace(
+ label_replace(ceph_disk_occupation_human{%(matchers)s}, "device", "$1", "device", "/dev/(.*)"),
+ "instance", "$1", "instance", "([^:.]*).*"
+ )
+ ||| % $.matchers(),
+ '{{device}}({{ceph_daemon}}) write',
+ 12,
+ 12,
+ 11,
+ 9
+ )
+ .addTargets(
+ [$.addTargetSchema(
+ |||
+ label_replace(
+ (
+ rate(node_disk_bytes_read{instance=~"($ceph_hosts)([\\\\.:].*)?"}[$__rate_interval]) or
+ rate(node_disk_read_bytes_total{instance=~"($ceph_hosts)([\\\\.:].*)?"}[$__rate_interval])
+ ),
+ "instance", "$1", "instance", "([^:.]*).*") * on(instance, device)
+ group_left(ceph_daemon) label_replace(
+ label_replace(ceph_disk_occupation_human{%(matchers)s}, "device", "$1", "device", "/dev/(.*)"),
+ "instance", "$1", "instance", "([^:.]*).*"
+ )
+ ||| % $.matchers(),
+ '{{device}}({{ceph_daemon}}) read'
+ )]
+ )
+ .addSeriesOverride(
+ { alias: '/.*read/', transform: 'negative-Y' }
+ ),
+ $.simpleGraphPanel(
+ {},
+ '$ceph_hosts Disk Latency',
+ "For OSD hosts, this chart shows the latency at the physical drive. Each drive is shown by device name, with it's corresponding OSD id",
+ 's',
+ '',
+ null,
+ |||
+ max by(instance, device) (label_replace(
+ (rate(node_disk_write_time_seconds_total{instance=~"($ceph_hosts)([\\\\.:].*)?"}[$__rate_interval])) /
+ clamp_min(rate(node_disk_writes_completed_total{instance=~"($ceph_hosts)([\\\\.:].*)?"}[$__rate_interval]), 0.001) or
+ (rate(node_disk_read_time_seconds_total{instance=~"($ceph_hosts)([\\\\.:].*)?"}[$__rate_interval])) /
+ clamp_min(rate(node_disk_reads_completed_total{instance=~"($ceph_hosts)([\\\\.:].*)?"}[$__rate_interval]), 0.001),
+ "instance", "$1", "instance", "([^:.]*).*"
+ )) * on(instance, device) group_left(ceph_daemon) label_replace(
+ label_replace(
+ ceph_disk_occupation_human{instance=~"($ceph_hosts)([\\\\.:].*)?"},
+ "device", "$1", "device", "/dev/(.*)"
+ ), "instance", "$1", "instance", "([^:.]*).*"
+ )
+ ||| % $.matchers(),
+ '{{device}}({{ceph_daemon}})',
+ 0,
+ 21,
+ 11,
+ 9
+ ),
+ $.simpleGraphPanel(
+ {},
+ '$ceph_hosts Disk utilization',
+ 'Show disk utilization % (util) of any OSD devices on the host by the physical device name and associated OSD id.',
+ 'percent',
+ '%Util',
+ null,
+ |||
+ label_replace(
+ (
+ (rate(node_disk_io_time_ms{instance=~"($ceph_hosts)([\\\\.:].*)?"}[$__rate_interval]) / 10) or
+ rate(node_disk_io_time_seconds_total{instance=~"($ceph_hosts)([\\\\.:].*)?"}[$__rate_interval]) * 100
+ ), "instance", "$1", "instance", "([^:.]*).*"
+ ) * on(instance, device) group_left(ceph_daemon) label_replace(
+ label_replace(ceph_disk_occupation_human{%(matchers)s, instance=~"($ceph_hosts)([\\\\.:].*)?"},
+ "device", "$1", "device", "/dev/(.*)"), "instance", "$1", "instance", "([^:.]*).*"
+ )
+ ||| % $.matchers(),
+ '{{device}}({{ceph_daemon}})',
+ 12,
+ 21,
+ 11,
+ 9
+ ),
+ ]),
+}
diff --git a/monitoring/ceph-mixin/dashboards/osd.libsonnet b/monitoring/ceph-mixin/dashboards/osd.libsonnet
new file mode 100644
index 000000000..129b74ba6
--- /dev/null
+++ b/monitoring/ceph-mixin/dashboards/osd.libsonnet
@@ -0,0 +1,593 @@
+local g = import 'grafonnet/grafana.libsonnet';
+
+(import 'utils.libsonnet') {
+ 'osds-overview.json':
+ $.dashboardSchema(
+ 'OSD Overview',
+ '',
+ 'lo02I1Aiz',
+ 'now-1h',
+ '30s',
+ 16,
+ $._config.dashboardTags,
+ ''
+ )
+ .addAnnotation(
+ $.addAnnotationSchema(
+ 1,
+ '-- Grafana --',
+ true,
+ true,
+ 'rgba(0, 211, 255, 1)',
+ 'Annotations & Alerts',
+ 'dashboard'
+ )
+ )
+ .addRequired(
+ type='grafana', id='grafana', name='Grafana', version='5.0.0'
+ )
+ .addRequired(
+ type='panel', id='grafana-piechart-panel', name='Pie Chart', version='1.3.3'
+ )
+ .addRequired(
+ type='panel', id='graph', name='Graph', version='5.0.0'
+ )
+ .addRequired(
+ type='panel', id='table', name='Table', version='5.0.0'
+ )
+ .addTemplate(
+ g.template.datasource('datasource', 'prometheus', 'default', label='Data Source')
+ )
+ .addTemplate(
+ $.addClusterTemplate()
+ )
+ .addTemplate(
+ $.addJobTemplate()
+ )
+ .addPanels([
+ $.simpleGraphPanel(
+ { '@95%ile': '#e0752d' },
+ 'OSD Read Latencies',
+ '',
+ 'ms',
+ null,
+ '0',
+ |||
+ avg (
+ rate(ceph_osd_op_r_latency_sum{%(matchers)s}[$__rate_interval]) /
+ on (ceph_daemon) rate(ceph_osd_op_r_latency_count{%(matchers)s}[$__rate_interval]) * 1000
+ )
+ ||| % $.matchers(),
+ 'AVG read',
+ 0,
+ 0,
+ 8,
+ 8
+ )
+ .addTargets(
+ [
+ $.addTargetSchema(
+ |||
+ max(
+ rate(ceph_osd_op_r_latency_sum{%(matchers)s}[$__rate_interval]) /
+ on (ceph_daemon) rate(ceph_osd_op_r_latency_count{%(matchers)s}[$__rate_interval]) * 1000
+ )
+ ||| % $.matchers(),
+ 'MAX read'
+ ),
+ $.addTargetSchema(
+ |||
+ quantile(0.95,
+ (
+ rate(ceph_osd_op_r_latency_sum{%(matchers)s}[$__rate_interval]) /
+ on (ceph_daemon) rate(ceph_osd_op_r_latency_count{%(matchers)s}[$__rate_interval])
+ * 1000
+ )
+ )
+ ||| % $.matchers(),
+ '@95%ile'
+ ),
+ ],
+ ),
+ $.addTableSchema(
+ '$datasource',
+ "This table shows the osd's that are delivering the 10 highest read latencies within the cluster",
+ { col: 2, desc: true },
+ [
+ $.overviewStyle('OSD ID', 'ceph_daemon', 'string', 'short'),
+ $.overviewStyle('Latency (ms)', 'Value', 'number', 'none'),
+ $.overviewStyle('', '/.*/', 'hidden', 'short'),
+ ],
+ 'Highest READ Latencies',
+ 'table'
+ )
+ .addTarget(
+ $.addTargetSchema(
+ |||
+ topk(10,
+ (sort(
+ (
+ rate(ceph_osd_op_r_latency_sum{%(matchers)s}[$__rate_interval]) /
+ on (ceph_daemon) rate(ceph_osd_op_r_latency_count{%(matchers)s}[$__rate_interval]) *
+ 1000
+ )
+ ))
+ )
+ ||| % $.matchers(),
+ '',
+ 'table',
+ 1,
+ true
+ )
+ ) + { gridPos: { x: 8, y: 0, w: 4, h: 8 } },
+ $.simpleGraphPanel(
+ {
+ '@95%ile write': '#e0752d',
+ },
+ 'OSD Write Latencies',
+ '',
+ 'ms',
+ null,
+ '0',
+ |||
+ avg(
+ rate(ceph_osd_op_w_latency_sum{%(matchers)s}[$__rate_interval]) /
+ on (ceph_daemon) rate(ceph_osd_op_w_latency_count{%(matchers)s}[$__rate_interval])
+ * 1000
+ )
+ ||| % $.matchers(),
+ 'AVG write',
+ 12,
+ 0,
+ 8,
+ 8
+ )
+ .addTargets(
+ [
+ $.addTargetSchema(
+ |||
+ max(
+ rate(ceph_osd_op_w_latency_sum{%(matchers)s}[$__rate_interval]) /
+ on (ceph_daemon) rate(ceph_osd_op_w_latency_count{%(matchers)s}[$__rate_interval]) *
+ 1000
+ )
+ ||| % $.matchers(), 'MAX write'
+ ),
+ $.addTargetSchema(
+ |||
+ quantile(0.95, (
+ rate(ceph_osd_op_w_latency_sum{%(matchers)s}[$__rate_interval]) /
+ on (ceph_daemon) rate(ceph_osd_op_w_latency_count{%(matchers)s}[$__rate_interval]) *
+ 1000
+ ))
+ ||| % $.matchers(), '@95%ile write'
+ ),
+ ],
+ ),
+ $.addTableSchema(
+ '$datasource',
+ "This table shows the osd's that are delivering the 10 highest write latencies within the cluster",
+ { col: 2, desc: true },
+ [
+ $.overviewStyle(
+ 'OSD ID', 'ceph_daemon', 'string', 'short'
+ ),
+ $.overviewStyle('Latency (ms)', 'Value', 'number', 'none'),
+ $.overviewStyle('', '/.*/', 'hidden', 'short'),
+ ],
+ 'Highest WRITE Latencies',
+ 'table'
+ )
+ .addTarget(
+ $.addTargetSchema(
+ |||
+ topk(10,
+ (sort(
+ (rate(ceph_osd_op_w_latency_sum{%(matchers)s}[$__rate_interval]) /
+ on (ceph_daemon) rate(ceph_osd_op_w_latency_count{%(matchers)s}[$__rate_interval]) *
+ 1000)
+ ))
+ )
+ ||| % $.matchers(),
+ '',
+ 'table',
+ 1,
+ true
+ )
+ ) + { gridPos: { x: 20, y: 0, w: 4, h: 8 } },
+ $.simplePieChart(
+ {}, '', 'OSD Types Summary'
+ )
+ .addTarget(
+ $.addTargetSchema('count by (device_class) (ceph_osd_metadata{%(matchers)s})' % $.matchers(), '{{device_class}}')
+ ) + { gridPos: { x: 0, y: 8, w: 4, h: 8 } },
+ $.simplePieChart(
+ { 'Non-Encrypted': '#E5AC0E' }, '', 'OSD Objectstore Types'
+ )
+ .addTarget(
+ $.addTargetSchema(
+ 'count(ceph_bluefs_wal_total_bytes{%(matchers)s})' % $.matchers(), 'bluestore', 'time_series', 2
+ )
+ )
+ .addTarget(
+ $.addTargetSchema(
+ 'absent(ceph_bluefs_wal_total_bytes{%(matchers)s}) * count(ceph_osd_metadata{%(matchers)s})' % $.matchers(), 'filestore', 'time_series', 2
+ )
+ ) + { gridPos: { x: 4, y: 8, w: 4, h: 8 } },
+ $.simplePieChart(
+ {}, 'The pie chart shows the various OSD sizes used within the cluster', 'OSD Size Summary'
+ )
+ .addTarget($.addTargetSchema(
+ 'count(ceph_osd_stat_bytes{%(matchers)s} < 1099511627776)' % $.matchers(), '<1TB', 'time_series', 2
+ ))
+ .addTarget($.addTargetSchema(
+ 'count(ceph_osd_stat_bytes{%(matchers)s} >= 1099511627776 < 2199023255552)' % $.matchers(), '<2TB', 'time_series', 2
+ ))
+ .addTarget($.addTargetSchema(
+ 'count(ceph_osd_stat_bytes{%(matchers)s} >= 2199023255552 < 3298534883328)' % $.matchers(), '<3TB', 'time_series', 2
+ ))
+ .addTarget($.addTargetSchema(
+ 'count(ceph_osd_stat_bytes{%(matchers)s} >= 3298534883328 < 4398046511104)' % $.matchers(), '<4TB', 'time_series', 2
+ ))
+ .addTarget($.addTargetSchema(
+ 'count(ceph_osd_stat_bytes{%(matchers)s} >= 4398046511104 < 6597069766656)' % $.matchers(), '<6TB', 'time_series', 2
+ ))
+ .addTarget($.addTargetSchema(
+ 'count(ceph_osd_stat_bytes{%(matchers)s} >= 6597069766656 < 8796093022208)' % $.matchers(), '<8TB', 'time_series', 2
+ ))
+ .addTarget($.addTargetSchema(
+ 'count(ceph_osd_stat_bytes{%(matchers)s} >= 8796093022208 < 10995116277760)' % $.matchers(), '<10TB', 'time_series', 2
+ ))
+ .addTarget($.addTargetSchema(
+ 'count(ceph_osd_stat_bytes{%(matchers)s} >= 10995116277760 < 13194139533312)' % $.matchers(), '<12TB', 'time_series', 2
+ ))
+ .addTarget($.addTargetSchema(
+ 'count(ceph_osd_stat_bytes{%(matchers)s} >= 13194139533312)' % $.matchers(), '<12TB+', 'time_series', 2
+ )) + { gridPos: { x: 8, y: 8, w: 4, h: 8 } },
+ g.graphPanel.new(bars=true,
+ datasource='$datasource',
+ title='Distribution of PGs per OSD',
+ x_axis_buckets=20,
+ x_axis_mode='histogram',
+ x_axis_values=['total'],
+ formatY1='short',
+ formatY2='short',
+ labelY1='# of OSDs',
+ min='0',
+ nullPointMode='null')
+ .addTarget($.addTargetSchema(
+ 'ceph_osd_numpg{%(matchers)s}' % $.matchers(), 'PGs per OSD', 'time_series', 1, true
+ )) + { gridPos: { x: 12, y: 8, w: 8, h: 8 } },
+ $.gaugeSingleStatPanel(
+ 'percentunit',
+ 'OSD onode Hits Ratio',
+ 'This gauge panel shows onode Hits ratio to help determine if increasing RAM per OSD could help improve the performance of the cluster',
+ 'current',
+ true,
+ 1,
+ true,
+ false,
+ '.75',
+ |||
+ sum(ceph_bluestore_onode_hits{%(matchers)s}) / (
+ sum(ceph_bluestore_onode_hits{%(matchers)s}) +
+ sum(ceph_bluestore_onode_misses{%(matchers)s})
+ )
+ ||| % $.matchers(),
+ 'time_series',
+ 20,
+ 8,
+ 4,
+ 8
+ ),
+ $.addRowSchema(false,
+ true,
+ 'R/W Profile') + { gridPos: { x: 0, y: 16, w: 24, h: 1 } },
+ $.simpleGraphPanel(
+ {},
+ 'Read/Write Profile',
+ 'Show the read/write workload profile overtime',
+ 'short',
+ null,
+ null,
+ 'round(sum(rate(ceph_pool_rd{%(matchers)s}[$__rate_interval])))' % $.matchers(),
+ 'Reads',
+ 0,
+ 17,
+ 24,
+ 8
+ )
+ .addTargets([$.addTargetSchema(
+ 'round(sum(rate(ceph_pool_wr{%(matchers)s}[$__rate_interval])))' % $.matchers(), 'Writes'
+ )]),
+ ]),
+ 'osd-device-details.json':
+ local OsdDeviceDetailsPanel(title,
+ description,
+ formatY1,
+ labelY1,
+ expr1,
+ expr2,
+ legendFormat1,
+ legendFormat2,
+ x,
+ y,
+ w,
+ h) =
+ $.graphPanelSchema({},
+ title,
+ description,
+ 'null',
+ false,
+ formatY1,
+ 'short',
+ labelY1,
+ null,
+ null,
+ 1,
+ '$datasource')
+ .addTargets(
+ [
+ $.addTargetSchema(expr1,
+ legendFormat1),
+ $.addTargetSchema(expr2, legendFormat2),
+ ]
+ ) + { gridPos: { x: x, y: y, w: w, h: h } };
+
+ $.dashboardSchema(
+ 'OSD device details',
+ '',
+ 'CrAHE0iZz',
+ 'now-3h',
+ '30s',
+ 16,
+ $._config.dashboardTags,
+ ''
+ )
+ .addAnnotation(
+ $.addAnnotationSchema(
+ 1,
+ '-- Grafana --',
+ true,
+ true,
+ 'rgba(0, 211, 255, 1)',
+ 'Annotations & Alerts',
+ 'dashboard'
+ )
+ )
+ .addRequired(
+ type='grafana', id='grafana', name='Grafana', version='5.3.2'
+ )
+ .addRequired(
+ type='panel', id='graph', name='Graph', version='5.0.0'
+ )
+ .addTemplate(
+ g.template.datasource('datasource',
+ 'prometheus',
+ 'default',
+ label='Data Source')
+ )
+ .addTemplate(
+ $.addClusterTemplate()
+ )
+ .addTemplate(
+ $.addJobTemplate()
+ )
+ .addTemplate(
+ $.addTemplateSchema('osd',
+ '$datasource',
+ 'label_values(ceph_osd_metadata{%(matchers)s}, ceph_daemon)' % $.matchers(),
+ 1,
+ false,
+ 1,
+ 'OSD',
+ '(.*)')
+ )
+ .addPanels([
+ $.addRowSchema(
+ false, true, 'OSD Performance'
+ ) + { gridPos: { x: 0, y: 0, w: 24, h: 1 } },
+ OsdDeviceDetailsPanel(
+ '$osd Latency',
+ '',
+ 's',
+ 'Read (-) / Write (+)',
+ |||
+ rate(ceph_osd_op_r_latency_sum{%(matchers)s, ceph_daemon=~"$osd"}[$__rate_interval]) /
+ on (ceph_daemon) rate(ceph_osd_op_r_latency_count{%(matchers)s}[$__rate_interval])
+ ||| % $.matchers(),
+ |||
+ rate(ceph_osd_op_w_latency_sum{%(matchers)s, ceph_daemon=~"$osd"}[$__rate_interval]) /
+ on (ceph_daemon) rate(ceph_osd_op_w_latency_count{%(matchers)s}[$__rate_interval])
+ ||| % $.matchers(),
+ 'read',
+ 'write',
+ 0,
+ 1,
+ 6,
+ 9
+ )
+ .addSeriesOverride(
+ {
+ alias: 'read',
+ transform: 'negative-Y',
+ }
+ ),
+ OsdDeviceDetailsPanel(
+ '$osd R/W IOPS',
+ '',
+ 'short',
+ 'Read (-) / Write (+)',
+ 'rate(ceph_osd_op_r{%(matchers)s, ceph_daemon=~"$osd"}[$__rate_interval])' % $.matchers(),
+ 'rate(ceph_osd_op_w{%(matchers)s, ceph_daemon=~"$osd"}[$__rate_interval])' % $.matchers(),
+ 'Reads',
+ 'Writes',
+ 6,
+ 1,
+ 6,
+ 9
+ )
+ .addSeriesOverride(
+ { alias: 'Reads', transform: 'negative-Y' }
+ ),
+ OsdDeviceDetailsPanel(
+ '$osd R/W Bytes',
+ '',
+ 'bytes',
+ 'Read (-) / Write (+)',
+ 'rate(ceph_osd_op_r_out_bytes{%(matchers)s, ceph_daemon=~"$osd"}[$__rate_interval])' % $.matchers(),
+ 'rate(ceph_osd_op_w_in_bytes{%(matchers)s, ceph_daemon=~"$osd"}[$__rate_interval])' % $.matchers(),
+ 'Read Bytes',
+ 'Write Bytes',
+ 12,
+ 1,
+ 6,
+ 9
+ )
+ .addSeriesOverride({ alias: 'Read Bytes', transform: 'negative-Y' }),
+ $.addRowSchema(
+ false, true, 'Physical Device Performance'
+ ) + { gridPos: { x: 0, y: 10, w: 24, h: 1 } },
+ OsdDeviceDetailsPanel(
+ 'Physical Device Latency for $osd',
+ '',
+ 's',
+ 'Read (-) / Write (+)',
+ |||
+ (
+ label_replace(
+ rate(node_disk_read_time_seconds_total{%(clusterMatcher)s}[$__rate_interval]) /
+ rate(node_disk_reads_completed_total{%(clusterMatcher)s}[$__rate_interval]),
+ "instance", "$1", "instance", "([^:.]*).*"
+ ) and on (instance, device) label_replace(
+ label_replace(
+ ceph_disk_occupation_human{%(matchers)s, ceph_daemon=~"$osd"},
+ "device", "$1", "device", "/dev/(.*)"
+ ), "instance", "$1", "instance", "([^:.]*).*"
+ )
+ )
+ ||| % $.matchers(),
+ |||
+ (
+ label_replace(
+ rate(node_disk_write_time_seconds_total{%(clusterMatcher)s}[$__rate_interval]) /
+ rate(node_disk_writes_completed_total{%(clusterMatcher)s}[$__rate_interval]),
+ "instance", "$1", "instance", "([^:.]*).*") and on (instance, device)
+ label_replace(
+ label_replace(
+ ceph_disk_occupation_human{%(matchers)s, ceph_daemon=~"$osd"}, "device", "$1", "device", "/dev/(.*)"
+ ), "instance", "$1", "instance", "([^:.]*).*"
+ )
+ )
+ ||| % $.matchers(),
+ '{{instance}}/{{device}} Reads',
+ '{{instance}}/{{device}} Writes',
+ 0,
+ 11,
+ 6,
+ 9
+ )
+ .addSeriesOverride(
+ { alias: '/.*Reads/', transform: 'negative-Y' }
+ ),
+ OsdDeviceDetailsPanel(
+ 'Physical Device R/W IOPS for $osd',
+ '',
+ 'short',
+ 'Read (-) / Write (+)',
+ |||
+ label_replace(
+ rate(node_disk_writes_completed_total{%(clusterMatcher)s}[$__rate_interval]),
+ "instance", "$1", "instance", "([^:.]*).*"
+ ) and on (instance, device) label_replace(
+ label_replace(
+ ceph_disk_occupation_human{%(matchers)s, ceph_daemon=~"$osd"},
+ "device", "$1", "device", "/dev/(.*)"
+ ), "instance", "$1", "instance", "([^:.]*).*"
+ )
+ ||| % $.matchers(),
+ |||
+ label_replace(
+ rate(node_disk_reads_completed_total{%(clusterMatcher)s}[$__rate_interval]),
+ "instance", "$1", "instance", "([^:.]*).*"
+ ) and on (instance, device) label_replace(
+ label_replace(
+ ceph_disk_occupation_human{%(matchers)s, ceph_daemon=~"$osd"},
+ "device", "$1", "device", "/dev/(.*)"
+ ), "instance", "$1", "instance", "([^:.]*).*"
+ )
+ ||| % $.matchers(),
+ '{{device}} on {{instance}} Writes',
+ '{{device}} on {{instance}} Reads',
+ 6,
+ 11,
+ 6,
+ 9
+ )
+ .addSeriesOverride(
+ { alias: '/.*Reads/', transform: 'negative-Y' }
+ ),
+ OsdDeviceDetailsPanel(
+ 'Physical Device R/W Bytes for $osd',
+ '',
+ 'Bps',
+ 'Read (-) / Write (+)',
+ |||
+ label_replace(
+ rate(node_disk_read_bytes_total{%(clusterMatcher)s}[$__rate_interval]), "instance", "$1", "instance", "([^:.]*).*"
+ ) and on (instance, device) label_replace(
+ label_replace(
+ ceph_disk_occupation_human{%(matchers)s, ceph_daemon=~"$osd"},
+ "device", "$1", "device", "/dev/(.*)"
+ ), "instance", "$1", "instance", "([^:.]*).*"
+ )
+ ||| % $.matchers(),
+ |||
+ label_replace(
+ rate(node_disk_written_bytes_total{%(clusterMatcher)s}[$__rate_interval]), "instance", "$1", "instance", "([^:.]*).*"
+ ) and on (instance, device) label_replace(
+ label_replace(
+ ceph_disk_occupation_human{%(matchers)s, ceph_daemon=~"$osd"},
+ "device", "$1", "device", "/dev/(.*)"
+ ), "instance", "$1", "instance", "([^:.]*).*"
+ )
+ ||| % $.matchers(),
+ '{{instance}} {{device}} Reads',
+ '{{instance}} {{device}} Writes',
+ 12,
+ 11,
+ 6,
+ 9
+ )
+ .addSeriesOverride(
+ { alias: '/.*Reads/', transform: 'negative-Y' }
+ ),
+ $.graphPanelSchema(
+ {},
+ 'Physical Device Util% for $osd',
+ '',
+ 'null',
+ false,
+ 'percentunit',
+ 'short',
+ null,
+ null,
+ null,
+ 1,
+ '$datasource'
+ )
+ .addTarget($.addTargetSchema(
+ |||
+ label_replace(
+ rate(node_disk_io_time_seconds_total{%(clusterMatcher)s}[$__rate_interval]),
+ "instance", "$1", "instance", "([^:.]*).*"
+ ) and on (instance, device) label_replace(
+ label_replace(
+ ceph_disk_occupation_human{%(matchers)s, ceph_daemon=~"$osd"}, "device", "$1", "device", "/dev/(.*)"
+ ), "instance", "$1", "instance", "([^:.]*).*"
+ )
+ ||| % $.matchers(),
+ '{{device}} on {{instance}}'
+ )) + { gridPos: { x: 18, y: 11, w: 6, h: 9 } },
+ ]),
+}
diff --git a/monitoring/ceph-mixin/dashboards/pool.libsonnet b/monitoring/ceph-mixin/dashboards/pool.libsonnet
new file mode 100644
index 000000000..6444335d9
--- /dev/null
+++ b/monitoring/ceph-mixin/dashboards/pool.libsonnet
@@ -0,0 +1,552 @@
+local g = import 'grafonnet/grafana.libsonnet';
+
+(import 'utils.libsonnet') {
+ 'pool-overview.json':
+ $.dashboardSchema(
+ 'Ceph Pools Overview',
+ '',
+ 'z99hzWtmk',
+ 'now-1h',
+ '30s',
+ 22,
+ $._config.dashboardTags,
+ ''
+ )
+ .addAnnotation(
+ $.addAnnotationSchema(
+ 1,
+ '-- Grafana --',
+ true,
+ true,
+ 'rgba(0, 211, 255, 1)',
+ 'Annotations & Alerts',
+ 'dashboard'
+ )
+ )
+ .addTemplate(
+ g.template.datasource('datasource', 'prometheus', 'default', label='Data Source')
+ )
+ .addTemplate(
+ $.addClusterTemplate()
+ )
+ .addTemplate(
+ $.addJobTemplate()
+ )
+ .addTemplate(
+ g.template.custom(label='TopK',
+ name='topk',
+ current='15',
+ query='15')
+ )
+ .addPanels([
+ $.simpleSingleStatPanel(
+ 'none',
+ 'Pools',
+ '',
+ 'avg',
+ 'count(ceph_pool_metadata{%(matchers)s})' % $.matchers(),
+ true,
+ 'table',
+ 0,
+ 0,
+ 3,
+ 3
+ ),
+ $.simpleSingleStatPanel(
+ 'none',
+ 'Pools with Compression',
+ 'Count of the pools that have compression enabled',
+ 'current',
+ 'count(ceph_pool_metadata{%(matchers)s, compression_mode!="none"})' % $.matchers(),
+ null,
+ '',
+ 3,
+ 0,
+ 3,
+ 3
+ ),
+ $.simpleSingleStatPanel(
+ 'bytes',
+ 'Total Raw Capacity',
+ 'Total raw capacity available to the cluster',
+ 'current',
+ 'sum(ceph_osd_stat_bytes{%(matchers)s})' % $.matchers(),
+ null,
+ '',
+ 6,
+ 0,
+ 3,
+ 3
+ ),
+ $.simpleSingleStatPanel(
+ 'bytes',
+ 'Raw Capacity Consumed',
+ 'Total raw capacity consumed by user data and associated overheads (metadata + redundancy)',
+ 'current',
+ 'sum(ceph_pool_bytes_used{%(matchers)s})' % $.matchers(),
+ true,
+ '',
+ 9,
+ 0,
+ 3,
+ 3
+ ),
+ $.simpleSingleStatPanel(
+ 'bytes',
+ 'Logical Stored ',
+ 'Total of client data stored in the cluster',
+ 'current',
+ 'sum(ceph_pool_stored{%(matchers)s})' % $.matchers(),
+ true,
+ '',
+ 12,
+ 0,
+ 3,
+ 3
+ ),
+ $.simpleSingleStatPanel(
+ 'bytes',
+ 'Compression Savings',
+ 'A compression saving is determined as the data eligible to be compressed minus the capacity used to store the data after compression',
+ 'current',
+ |||
+ sum(
+ ceph_pool_compress_under_bytes{%(matchers)s} -
+ ceph_pool_compress_bytes_used{%(matchers)s}
+ )
+ ||| % $.matchers(),
+ null,
+ '',
+ 15,
+ 0,
+ 3,
+ 3
+ ),
+ $.simpleSingleStatPanel(
+ 'percent',
+ 'Compression Eligibility',
+ 'Indicates how suitable the data is within the pools that are/have been enabled for compression - averaged across all pools holding compressed data',
+ 'current',
+ |||
+ (
+ sum(ceph_pool_compress_under_bytes{%(matchers)s} > 0) /
+ sum(ceph_pool_stored_raw{%(matchers)s} and ceph_pool_compress_under_bytes{%(matchers)s} > 0)
+ ) * 100
+ ||| % $.matchers(),
+ null,
+ 'table',
+ 18,
+ 0,
+ 3,
+ 3
+ ),
+ $.simpleSingleStatPanel(
+ 'none',
+ 'Compression Factor',
+ 'This factor describes the average ratio of data eligible to be compressed divided by the data actually stored. It does not account for data written that was ineligible for compression (too small, or compression yield too low)',
+ 'current',
+ |||
+ sum(
+ ceph_pool_compress_under_bytes{%(matchers)s} > 0)
+ / sum(ceph_pool_compress_bytes_used{%(matchers)s} > 0
+ )
+ ||| % $.matchers(),
+ null,
+ '',
+ 21,
+ 0,
+ 3,
+ 3
+ ),
+ $.addTableSchema(
+ '$datasource',
+ '',
+ { col: 5, desc: true },
+ [
+ $.overviewStyle('', 'Time', 'hidden', 'short'),
+ $.overviewStyle('', 'instance', 'hidden', 'short'),
+ $.overviewStyle('', 'job', 'hidden', 'short'),
+ $.overviewStyle('Pool Name', 'name', 'string', 'short'),
+ $.overviewStyle('Pool ID', 'pool_id', 'hidden', 'none'),
+ $.overviewStyle('Compression Factor', 'Value #A', 'number', 'none'),
+ $.overviewStyle('% Used', 'Value #D', 'number', 'percentunit', 'value', ['70', '85']),
+ $.overviewStyle('Usable Free', 'Value #B', 'number', 'bytes'),
+ $.overviewStyle('Compression Eligibility', 'Value #C', 'number', 'percent'),
+ $.overviewStyle('Compression Savings', 'Value #E', 'number', 'bytes'),
+ $.overviewStyle('Growth (5d)', 'Value #F', 'number', 'bytes', 'value', ['0', '0']),
+ $.overviewStyle('IOPS', 'Value #G', 'number', 'none'),
+ $.overviewStyle('Bandwidth', 'Value #H', 'number', 'Bps'),
+ $.overviewStyle('', '__name__', 'hidden', 'short'),
+ $.overviewStyle('', 'type', 'hidden', 'short'),
+ $.overviewStyle('', 'compression_mode', 'hidden', 'short'),
+ $.overviewStyle('Type', 'description', 'string', 'short'),
+ $.overviewStyle('Stored', 'Value #J', 'number', 'bytes'),
+ $.overviewStyle('', 'Value #I', 'hidden', 'short'),
+ $.overviewStyle('Compression', 'Value #K', 'string', 'short', null, [], [{ text: 'ON', value: '1' }]),
+ ],
+ 'Pool Overview',
+ 'table'
+ )
+ .addTargets(
+ [
+ $.addTargetSchema(
+ |||
+ (
+ ceph_pool_compress_under_bytes{%(matchers)s} /
+ ceph_pool_compress_bytes_used{%(matchers)s} > 0
+ ) and on(pool_id) (
+ (
+ (ceph_pool_compress_under_bytes{%(matchers)s} > 0) /
+ ceph_pool_stored_raw{%(matchers)s}
+ ) * 100 > 0.5
+ )
+ ||| % $.matchers(),
+ 'A',
+ 'table',
+ 1,
+ true
+ ),
+ $.addTargetSchema(
+ |||
+ ceph_pool_max_avail{%(matchers)s} *
+ on(pool_id) group_left(name) ceph_pool_metadata{%(matchers)s}
+ ||| % $.matchers(),
+ 'B',
+ 'table',
+ 1,
+ true
+ ),
+ $.addTargetSchema(
+ |||
+ (
+ (ceph_pool_compress_under_bytes{%(matchers)s} > 0) /
+ ceph_pool_stored_raw{%(matchers)s}
+ ) * 100
+ ||| % $.matchers(),
+ 'C',
+ 'table',
+ 1,
+ true
+ ),
+ $.addTargetSchema(
+ |||
+ ceph_pool_percent_used{%(matchers)s} *
+ on(pool_id) group_left(name) ceph_pool_metadata{%(matchers)s}
+ ||| % $.matchers(),
+ 'D',
+ 'table',
+ 1,
+ true
+ ),
+ $.addTargetSchema(
+ |||
+ ceph_pool_compress_under_bytes{%(matchers)s} -
+ ceph_pool_compress_bytes_used{%(matchers)s} > 0
+ ||| % $.matchers(),
+ 'E',
+ 'table',
+ 1,
+ true
+ ),
+ $.addTargetSchema(
+ 'delta(ceph_pool_stored{%(matchers)s}[5d])' % $.matchers(), 'F', 'table', 1, true
+ ),
+ $.addTargetSchema(
+ |||
+ rate(ceph_pool_rd{%(matchers)s}[$__rate_interval])
+ + rate(ceph_pool_wr{%(matchers)s}[$__rate_interval])
+ ||| % $.matchers(),
+ 'G',
+ 'table',
+ 1,
+ true
+ ),
+ $.addTargetSchema(
+ |||
+ rate(ceph_pool_rd_bytes{%(matchers)s}[$__rate_interval]) +
+ rate(ceph_pool_wr_bytes{%(matchers)s}[$__rate_interval])
+ ||| % $.matchers(),
+ 'H',
+ 'table',
+ 1,
+ true
+ ),
+ $.addTargetSchema(
+ 'ceph_pool_metadata{%(matchers)s}' % $.matchers(), 'I', 'table', 1, true
+ ),
+ $.addTargetSchema(
+ 'ceph_pool_stored{%(matchers)s} * on(pool_id) group_left ceph_pool_metadata{%(matchers)s}' % $.matchers(),
+ 'J',
+ 'table',
+ 1,
+ true
+ ),
+ $.addTargetSchema(
+ 'ceph_pool_metadata{%(matchers)s, compression_mode!="none"}' % $.matchers(), 'K', 'table', 1, true
+ ),
+ $.addTargetSchema('', 'L', '', '', null),
+ ]
+ ) + { gridPos: { x: 0, y: 3, w: 24, h: 6 } },
+ $.simpleGraphPanel(
+ {},
+ 'Top $topk Client IOPS by Pool',
+ 'This chart shows the sum of read and write IOPS from all clients by pool',
+ 'short',
+ 'IOPS',
+ 0,
+ |||
+ topk($topk,
+ round(
+ (
+ rate(ceph_pool_rd{%(matchers)s}[$__rate_interval]) +
+ rate(ceph_pool_wr{%(matchers)s}[$__rate_interval])
+ ), 1
+ ) * on(pool_id) group_left(instance,name) ceph_pool_metadata{%(matchers)s})
+ ||| % $.matchers(),
+ '{{name}} ',
+ 0,
+ 9,
+ 12,
+ 8
+ )
+ .addTarget(
+ $.addTargetSchema(
+ |||
+ topk($topk,
+ rate(ceph_pool_wr{%(matchers)s}[$__rate_interval]) +
+ on(pool_id) group_left(instance,name) ceph_pool_metadata{%(matchers)s}
+ )
+ ||| % $.matchers(),
+ '{{name}} - write'
+ )
+ ),
+ $.simpleGraphPanel(
+ {},
+ 'Top $topk Client Bandwidth by Pool',
+ 'The chart shows the sum of read and write bytes from all clients, by pool',
+ 'Bps',
+ 'Throughput',
+ 0,
+ |||
+ topk($topk,
+ (
+ rate(ceph_pool_rd_bytes{%(matchers)s}[$__rate_interval]) +
+ rate(ceph_pool_wr_bytes{%(matchers)s}[$__rate_interval])
+ ) * on(pool_id) group_left(instance, name) ceph_pool_metadata{%(matchers)s}
+ )
+ ||| % $.matchers(),
+ '{{name}}',
+ 12,
+ 9,
+ 12,
+ 8
+ ),
+ $.simpleGraphPanel(
+ {},
+ 'Pool Capacity Usage (RAW)',
+ 'Historical view of capacity usage, to help identify growth and trends in pool consumption',
+ 'bytes',
+ 'Capacity Used',
+ 0,
+ 'ceph_pool_bytes_used{%(matchers)s} * on(pool_id) group_right ceph_pool_metadata{%(matchers)s}' % $.matchers(),
+ '{{name}}',
+ 0,
+ 17,
+ 24,
+ 7
+ ),
+ ]),
+ 'pool-detail.json':
+ $.dashboardSchema(
+ 'Ceph Pool Details',
+ '',
+ '-xyV8KCiz',
+ 'now-1h',
+ '30s',
+ 22,
+ $._config.dashboardTags,
+ ''
+ )
+ .addRequired(
+ type='grafana', id='grafana', name='Grafana', version='5.3.2'
+ )
+ .addRequired(
+ type='panel', id='graph', name='Graph', version='5.0.0'
+ )
+ .addRequired(
+ type='panel', id='singlestat', name='Singlestat', version='5.0.0'
+ )
+ .addAnnotation(
+ $.addAnnotationSchema(
+ 1,
+ '-- Grafana --',
+ true,
+ true,
+ 'rgba(0, 211, 255, 1)',
+ 'Annotations & Alerts',
+ 'dashboard'
+ )
+ )
+ .addTemplate(
+ g.template.datasource('datasource', 'prometheus', 'default', label='Data Source')
+ )
+ .addTemplate(
+ $.addClusterTemplate()
+ )
+ .addTemplate(
+ $.addJobTemplate()
+ )
+ .addTemplate(
+ $.addTemplateSchema('pool_name',
+ '$datasource',
+ 'label_values(ceph_pool_metadata{%(matchers)s}, name)' % $.matchers(),
+ 1,
+ false,
+ 1,
+ 'Pool Name',
+ '')
+ )
+ .addPanels([
+ $.gaugeSingleStatPanel(
+ 'percentunit',
+ 'Capacity used',
+ '',
+ 'current',
+ true,
+ 1,
+ true,
+ true,
+ '.7,.8',
+ |||
+ (ceph_pool_stored{%(matchers)s} / (ceph_pool_stored{%(matchers)s} + ceph_pool_max_avail{%(matchers)s})) *
+ on(pool_id) group_left(instance, name) ceph_pool_metadata{%(matchers)s, name=~"$pool_name"}
+ ||| % $.matchers(),
+ 'time_series',
+ 0,
+ 0,
+ 7,
+ 7
+ ),
+ $.gaugeSingleStatPanel(
+ 's',
+ 'Time till full',
+ 'Time till pool is full assuming the average fill rate of the last 6 hours',
+ false,
+ 100,
+ false,
+ false,
+ '',
+ 'current',
+ |||
+ (ceph_pool_max_avail{%(matchers)s} / deriv(ceph_pool_stored{%(matchers)s}[6h])) *
+ on(pool_id) group_left(instance, name) ceph_pool_metadata{%(matchers)s, name=~"$pool_name"} > 0
+ ||| % $.matchers(),
+ 'time_series',
+ 7,
+ 0,
+ 5,
+ 7
+ ),
+ $.simpleGraphPanel(
+ {
+ read_op_per_sec:
+ '#3F6833',
+ write_op_per_sec: '#E5AC0E',
+ },
+ '$pool_name Object Ingress/Egress',
+ '',
+ 'ops',
+ 'Objects out(-) / in(+) ',
+ null,
+ |||
+ deriv(ceph_pool_objects{%(matchers)s}[1m]) *
+ on(pool_id) group_left(instance, name) ceph_pool_metadata{%(matchers)s, name=~"$pool_name"}
+ ||| % $.matchers(),
+ 'Objects per second',
+ 12,
+ 0,
+ 12,
+ 7
+ ),
+ $.simpleGraphPanel(
+ {
+ read_op_per_sec: '#3F6833',
+ write_op_per_sec: '#E5AC0E',
+ },
+ '$pool_name Client IOPS',
+ '',
+ 'iops',
+ 'Read (-) / Write (+)',
+ null,
+ |||
+ rate(ceph_pool_rd{%(matchers)s}[$__rate_interval]) *
+ on(pool_id) group_left(instance,name) ceph_pool_metadata{%(matchers)s, name=~"$pool_name"}
+ ||| % $.matchers(),
+ 'reads',
+ 0,
+ 7,
+ 12,
+ 7
+ )
+ .addSeriesOverride({ alias: 'reads', transform: 'negative-Y' })
+ .addTarget(
+ $.addTargetSchema(
+ |||
+ rate(ceph_pool_wr{%(matchers)s}[$__rate_interval]) *
+ on(pool_id) group_left(instance, name) ceph_pool_metadata{%(matchers)s, name=~"$pool_name"}
+ ||| % $.matchers(),
+ 'writes'
+ )
+ ),
+ $.simpleGraphPanel(
+ {
+ read_op_per_sec: '#3F6833',
+ write_op_per_sec: '#E5AC0E',
+ },
+ '$pool_name Client Throughput',
+ '',
+ 'Bps',
+ 'Read (-) / Write (+)',
+ null,
+ |||
+ rate(ceph_pool_rd_bytes{%(matchers)s}[$__rate_interval]) +
+ on(pool_id) group_left(instance, name) ceph_pool_metadata{%(matchers)s, name=~"$pool_name"}
+ ||| % $.matchers(),
+ 'reads',
+ 12,
+ 7,
+ 12,
+ 7
+ )
+ .addSeriesOverride({ alias: 'reads', transform: 'negative-Y' })
+ .addTarget(
+ $.addTargetSchema(
+ |||
+ rate(ceph_pool_wr_bytes{%(matchers)s}[$__rate_interval]) +
+ on(pool_id) group_left(instance,name) ceph_pool_metadata{%(matchers)s, name=~"$pool_name"}
+ ||| % $.matchers(),
+ 'writes'
+ )
+ ),
+ $.simpleGraphPanel(
+ {
+ read_op_per_sec: '#3F6833',
+ write_op_per_sec: '#E5AC0E',
+ },
+ '$pool_name Objects',
+ '',
+ 'short',
+ 'Objects',
+ null,
+ |||
+ ceph_pool_objects{%(matchers)s} *
+ on(pool_id) group_left(instance,name) ceph_pool_metadata{%(matchers)s, name=~"$pool_name"}
+ ||| % $.matchers(),
+ 'Number of Objects',
+ 0,
+ 14,
+ 12,
+ 7
+ ),
+ ]),
+}
diff --git a/monitoring/ceph-mixin/dashboards/rbd.libsonnet b/monitoring/ceph-mixin/dashboards/rbd.libsonnet
new file mode 100644
index 000000000..0eca5a877
--- /dev/null
+++ b/monitoring/ceph-mixin/dashboards/rbd.libsonnet
@@ -0,0 +1,337 @@
+local g = import 'grafonnet/grafana.libsonnet';
+local u = import 'utils.libsonnet';
+
+(import 'utils.libsonnet') {
+ 'rbd-details.json':
+ local RbdDetailsPanel(title, formatY1, expr1, expr2, x, y, w, h) =
+ $.graphPanelSchema({},
+ title,
+ '',
+ 'null as zero',
+ false,
+ formatY1,
+ formatY1,
+ null,
+ null,
+ 0,
+ 1,
+ '$datasource')
+ .addTargets(
+ [
+ $.addTargetSchema(expr1,
+ '{{pool}} Write'),
+ $.addTargetSchema(expr2, '{{pool}} Read'),
+ ]
+ ) + { gridPos: { x: x, y: y, w: w, h: h } };
+
+ $.dashboardSchema(
+ 'RBD Details',
+ 'Detailed Performance of RBD Images (IOPS/Throughput/Latency)',
+ 'YhCYGcuZz',
+ 'now-1h',
+ '30s',
+ 16,
+ $._config.dashboardTags,
+ ''
+ )
+ .addAnnotation(
+ $.addAnnotationSchema(
+ 1,
+ '-- Grafana --',
+ true,
+ true,
+ 'rgba(0, 211, 255, 1)',
+ 'Annotations & Alerts',
+ 'dashboard'
+ )
+ )
+ .addRequired(
+ type='grafana', id='grafana', name='Grafana', version='5.3.3'
+ )
+ .addRequired(
+ type='panel', id='graph', name='Graph', version='5.0.0'
+ )
+ .addTemplate(
+ g.template.datasource('datasource', 'prometheus', 'default', label='Data Source')
+ )
+ .addTemplate(
+ $.addClusterTemplate()
+ )
+ .addTemplate(
+ $.addJobTemplate()
+ )
+ .addTemplate(
+ $.addTemplateSchema('pool',
+ '$datasource',
+ 'label_values(pool)',
+ 1,
+ false,
+ 0,
+ '',
+ '')
+ )
+ .addTemplate(
+ $.addTemplateSchema('image',
+ '$datasource',
+ 'label_values(image)',
+ 1,
+ false,
+ 0,
+ '',
+ '')
+ )
+ .addPanels([
+ RbdDetailsPanel(
+ 'IOPS',
+ 'iops',
+ 'rate(ceph_rbd_write_ops{%(matchers)s, pool="$pool", image="$image"}[$__rate_interval])' % $.matchers()
+ ,
+ 'rate(ceph_rbd_read_ops{%(matchers)s, pool="$pool", image="$image"}[$__rate_interval])' % $.matchers(),
+ 0,
+ 0,
+ 8,
+ 9
+ ),
+ RbdDetailsPanel(
+ 'Throughput',
+ 'Bps',
+ 'rate(ceph_rbd_write_bytes{%(matchers)s, pool="$pool", image="$image"}[$__rate_interval])' % $.matchers(),
+ 'rate(ceph_rbd_read_bytes{%(matchers)s, pool="$pool", image="$image"}[$__rate_interval])' % $.matchers(),
+ 8,
+ 0,
+ 8,
+ 9
+ ),
+ RbdDetailsPanel(
+ 'Average Latency',
+ 'ns',
+ |||
+ rate(ceph_rbd_write_latency_sum{%(matchers)s, pool="$pool", image="$image"}[$__rate_interval]) /
+ rate(ceph_rbd_write_latency_count{%(matchers)s, pool="$pool", image="$image"}[$__rate_interval])
+ ||| % $.matchers(),
+ |||
+ rate(ceph_rbd_read_latency_sum{%(matchers)s, pool="$pool", image="$image"}[$__rate_interval]) /
+ rate(ceph_rbd_read_latency_count{%(matchers)s, pool="$pool", image="$image"}[$__rate_interval])
+ ||| % $.matchers(),
+ 16,
+ 0,
+ 8,
+ 9
+ ),
+ ]),
+ 'rbd-overview.json':
+ local RbdOverviewPanel(title,
+ formatY1,
+ expr1,
+ expr2,
+ legendFormat1,
+ legendFormat2,
+ x,
+ y,
+ w,
+ h) =
+ $.graphPanelSchema({},
+ title,
+ '',
+ 'null',
+ false,
+ formatY1,
+ 'short',
+ null,
+ null,
+ 0,
+ 1,
+ '$datasource')
+ .addTargets(
+ [
+ $.addTargetSchema(expr1,
+ legendFormat1),
+ $.addTargetSchema(expr2,
+ legendFormat2),
+ ]
+ ) + { gridPos: { x: x, y: y, w: w, h: h } };
+
+ $.dashboardSchema(
+ 'RBD Overview',
+ '',
+ '41FrpeUiz',
+ 'now-1h',
+ '30s',
+ 16,
+ $._config.dashboardTags + ['overview'],
+ ''
+ )
+ .addAnnotation(
+ $.addAnnotationSchema(
+ 1,
+ '-- Grafana --',
+ true,
+ true,
+ 'rgba(0, 211, 255, 1)',
+ 'Annotations & Alerts',
+ 'dashboard'
+ )
+ )
+ .addRequired(
+ type='grafana', id='grafana', name='Grafana', version='5.4.2'
+ )
+ .addRequired(
+ type='panel', id='graph', name='Graph', version='5.0.0'
+ )
+ .addRequired(
+ type='datasource', id='prometheus', name='Prometheus', version='5.0.0'
+ )
+ .addRequired(
+ type='panel', id='table', name='Table', version='5.0.0'
+ )
+ .addTemplate(
+ g.template.datasource('datasource', 'prometheus', 'default', label='Data Source')
+ )
+ .addTemplate(
+ $.addClusterTemplate()
+ )
+ .addTemplate(
+ $.addJobTemplate()
+ )
+ .addPanels([
+ RbdOverviewPanel(
+ 'IOPS',
+ 'short',
+ 'round(sum(rate(ceph_rbd_write_ops{%(matchers)s}[$__rate_interval])))' % $.matchers(),
+ 'round(sum(rate(ceph_rbd_read_ops{%(matchers)s}[$__rate_interval])))' % $.matchers(),
+ 'Writes',
+ 'Reads',
+ 0,
+ 0,
+ 8,
+ 7
+ ),
+ RbdOverviewPanel(
+ 'Throughput',
+ 'Bps',
+ 'round(sum(rate(ceph_rbd_write_bytes{%(matchers)s}[$__rate_interval])))' % $.matchers(),
+ 'round(sum(rate(ceph_rbd_read_bytes{%(matchers)s}[$__rate_interval])))' % $.matchers(),
+ 'Write',
+ 'Read',
+ 8,
+ 0,
+ 8,
+ 7
+ ),
+ RbdOverviewPanel(
+ 'Average Latency',
+ 'ns',
+ |||
+ round(
+ sum(rate(ceph_rbd_write_latency_sum{%(matchers)s}[$__rate_interval])) /
+ sum(rate(ceph_rbd_write_latency_count{%(matchers)s}[$__rate_interval]))
+ )
+ ||| % $.matchers(),
+ |||
+ round(
+ sum(rate(ceph_rbd_read_latency_sum{%(matchers)s}[$__rate_interval])) /
+ sum(rate(ceph_rbd_read_latency_count{%(matchers)s}[$__rate_interval]))
+ )
+ ||| % $.matchers(),
+ 'Write',
+ 'Read',
+ 16,
+ 0,
+ 8,
+ 7
+ ),
+ $.addTableSchema(
+ '$datasource',
+ '',
+ { col: 3, desc: true },
+ [
+ $.overviewStyle('Pool', 'pool', 'string', 'short'),
+ $.overviewStyle('Image', 'image', 'string', 'short'),
+ $.overviewStyle('IOPS', 'Value', 'number', 'iops'),
+ $.overviewStyle('', '/.*/', 'hidden', 'short'),
+ ],
+ 'Highest IOPS',
+ 'table'
+ )
+ .addTarget(
+ $.addTargetSchema(
+ |||
+ topk(10,
+ (
+ sort((
+ rate(ceph_rbd_write_ops{%(matchers)s}[$__rate_interval]) +
+ on (image, pool, namespace) rate(ceph_rbd_read_ops{%(matchers)s}[$__rate_interval])
+ ))
+ )
+ )
+ ||| % $.matchers(),
+ '',
+ 'table',
+ 1,
+ true
+ )
+ ) + { gridPos: { x: 0, y: 7, w: 8, h: 7 } },
+ $.addTableSchema(
+ '$datasource',
+ '',
+ { col: 3, desc: true },
+ [
+ $.overviewStyle('Pool', 'pool', 'string', 'short'),
+ $.overviewStyle('Image', 'image', 'string', 'short'),
+ $.overviewStyle('Throughput', 'Value', 'number', 'Bps'),
+ $.overviewStyle('', '/.*/', 'hidden', 'short'),
+ ],
+ 'Highest Throughput',
+ 'table'
+ )
+ .addTarget(
+ $.addTargetSchema(
+ |||
+ topk(10,
+ sort(
+ sum(
+ rate(ceph_rbd_read_bytes{%(matchers)s}[$__rate_interval]) +
+ rate(ceph_rbd_write_bytes{%(matchers)s}[$__rate_interval])
+ ) by (pool, image, namespace)
+ )
+ )
+ ||| % $.matchers(),
+ '',
+ 'table',
+ 1,
+ true
+ )
+ ) + { gridPos: { x: 8, y: 7, w: 8, h: 7 } },
+ $.addTableSchema(
+ '$datasource',
+ '',
+ { col: 3, desc: true },
+ [
+ $.overviewStyle('Pool', 'pool', 'string', 'short'),
+ $.overviewStyle('Image', 'image', 'string', 'short'),
+ $.overviewStyle('Latency', 'Value', 'number', 'ns'),
+ $.overviewStyle('', '/.*/', 'hidden', 'short'),
+ ],
+ 'Highest Latency',
+ 'table'
+ )
+ .addTarget(
+ $.addTargetSchema(
+ |||
+ topk(10,
+ sum(
+ rate(ceph_rbd_write_latency_sum{%(matchers)s}[$__rate_interval]) /
+ clamp_min(rate(ceph_rbd_write_latency_count{%(matchers)s}[$__rate_interval]), 1) +
+ rate(ceph_rbd_read_latency_sum{%(matchers)s}[$__rate_interval]) /
+ clamp_min(rate(ceph_rbd_read_latency_count{%(matchers)s}[$__rate_interval]), 1)
+ ) by (pool, image, namespace)
+ )
+ ||| % $.matchers(),
+ '',
+ 'table',
+ 1,
+ true
+ )
+ ) + { gridPos: { x: 16, y: 7, w: 8, h: 7 } },
+ ]),
+}
diff --git a/monitoring/ceph-mixin/dashboards/rgw.libsonnet b/monitoring/ceph-mixin/dashboards/rgw.libsonnet
new file mode 100644
index 000000000..892480d1c
--- /dev/null
+++ b/monitoring/ceph-mixin/dashboards/rgw.libsonnet
@@ -0,0 +1,872 @@
+local g = import 'grafonnet/grafana.libsonnet';
+local u = import 'utils.libsonnet';
+
+(import 'utils.libsonnet') {
+ 'radosgw-sync-overview.json':
+ local RgwSyncOverviewPanel(title, formatY1, labelY1, rgwMetric, x, y, w, h) =
+ $.graphPanelSchema({},
+ title,
+ '',
+ 'null as zero',
+ true,
+ formatY1,
+ 'short',
+ labelY1,
+ null,
+ 0,
+ 1,
+ '$datasource')
+ .addTargets(
+ [
+ $.addTargetSchema(
+ 'sum by (source_zone) (rate(%(rgwMetric)s{%(matchers)s}[$__rate_interval]))'
+ % ($.matchers() + { rgwMetric: rgwMetric }),
+ '{{source_zone}}'
+ ),
+ ]
+ ) + { gridPos: { x: x, y: y, w: w, h: h } };
+
+ $.dashboardSchema(
+ 'RGW Sync Overview',
+ '',
+ 'rgw-sync-overview',
+ 'now-1h',
+ '30s',
+ 16,
+ $._config.dashboardTags + ['overview'],
+ ''
+ )
+ .addAnnotation(
+ $.addAnnotationSchema(
+ 1,
+ '-- Grafana --',
+ true,
+ true,
+ 'rgba(0, 211, 255, 1)',
+ 'Annotations & Alerts',
+ 'dashboard'
+ )
+ )
+ .addRequired(
+ type='grafana', id='grafana', name='Grafana', version='5.0.0'
+ )
+ .addRequired(
+ type='panel', id='graph', name='Graph', version='5.0.0'
+ )
+ .addTemplate(
+ g.template.datasource('datasource', 'prometheus', 'default', label='Data Source')
+ )
+ .addTemplate(
+ $.addClusterTemplate()
+ )
+ .addTemplate(
+ $.addJobTemplate()
+ )
+ .addTemplate(
+ $.addTemplateSchema(
+ 'rgw_servers',
+ '$datasource',
+ 'label_values(ceph_rgw_metadata{%(matchers)s}, ceph_daemon)' % $.matchers(),
+ 1,
+ true,
+ 1,
+ '',
+ 'RGW Server'
+ )
+ )
+ .addPanels([
+ RgwSyncOverviewPanel(
+ 'Replication (throughput) from Source Zone',
+ 'Bps',
+ null,
+ 'ceph_data_sync_from_zone_fetch_bytes_sum',
+ 0,
+ 0,
+ 8,
+ 7
+ ),
+ RgwSyncOverviewPanel(
+ 'Replication (objects) from Source Zone',
+ 'short',
+ 'Objects/s',
+ 'ceph_data_sync_from_zone_fetch_bytes_count',
+ 8,
+ 0,
+ 8,
+ 7
+ ),
+ RgwSyncOverviewPanel(
+ 'Polling Request Latency from Source Zone',
+ 'ms',
+ null,
+ 'ceph_data_sync_from_zone_poll_latency_sum',
+ 16,
+ 0,
+ 8,
+ 7
+ ),
+ RgwSyncOverviewPanel(
+ 'Unsuccessful Object Replications from Source Zone',
+ 'short',
+ 'Count/s',
+ 'ceph_data_sync_from_zone_fetch_errors',
+ 0,
+ 7,
+ 8,
+ 7
+ ),
+ ]),
+ 'radosgw-overview.json':
+ local RgwOverviewPanel(
+ title,
+ description,
+ formatY1,
+ formatY2,
+ expr1,
+ legendFormat1,
+ x,
+ y,
+ w,
+ h,
+ datasource='$datasource',
+ legend_alignAsTable=false,
+ legend_avg=false,
+ legend_min=false,
+ legend_max=false,
+ legend_current=false,
+ legend_values=false
+ ) =
+ $.graphPanelSchema(
+ {},
+ title,
+ description,
+ 'null',
+ false,
+ formatY1,
+ formatY2,
+ null,
+ null,
+ 0,
+ 1,
+ datasource,
+ legend_alignAsTable,
+ legend_avg,
+ legend_min,
+ legend_max,
+ legend_current,
+ legend_values
+ )
+ .addTargets(
+ [$.addTargetSchema(expr1, legendFormat1)]
+ ) + { gridPos: { x: x, y: y, w: w, h: h } };
+
+ $.dashboardSchema(
+ 'RGW Overview',
+ '',
+ 'WAkugZpiz',
+ 'now-1h',
+ '30s',
+ 16,
+ $._config.dashboardTags + ['overview'],
+ ''
+ )
+ .addAnnotation(
+ $.addAnnotationSchema(
+ 1,
+ '-- Grafana --',
+ true,
+ true,
+ 'rgba(0, 211, 255, 1)',
+ 'Annotations & Alerts',
+ 'dashboard'
+ )
+ )
+ .addRequired(
+ type='grafana', id='grafana', name='Grafana', version='5.0.0'
+ )
+ .addRequired(
+ type='panel', id='graph', name='Graph', version='5.0.0'
+ )
+ .addTemplate(
+ g.template.datasource('datasource',
+ 'prometheus',
+ 'default',
+ label='Data Source')
+ )
+ .addTemplate(
+ $.addClusterTemplate()
+ )
+ .addTemplate(
+ $.addJobTemplate()
+ )
+ .addTemplate(
+ $.addTemplateSchema(
+ 'rgw_servers',
+ '$datasource',
+ 'label_values(ceph_rgw_metadata{%(matchers)s}, ceph_daemon)' % $.matchers(),
+ 1,
+ true,
+ 1,
+ '',
+ 'RGW Server'
+ )
+ )
+ .addTemplate(
+ $.addTemplateSchema(
+ 'code',
+ '$datasource',
+ 'label_values(haproxy_server_http_responses_total{job=~"$job_haproxy", instance=~"$ingress_service"}, code)',
+ 1,
+ true,
+ 1,
+ 'HTTP Code',
+ ''
+ )
+ )
+ .addTemplate(
+ $.addTemplateSchema(
+ 'job_haproxy',
+ '$datasource',
+ 'label_values(haproxy_server_status, job)',
+ 1,
+ true,
+ 1,
+ 'job haproxy',
+ '(.*)',
+ multi=true,
+ allValues='.+',
+ ),
+ )
+ .addTemplate(
+ $.addTemplateSchema(
+ 'ingress_service',
+ '$datasource',
+ 'label_values(haproxy_server_status{job=~"$job_haproxy"}, instance)',
+ 1,
+ true,
+ 1,
+ 'Ingress Service',
+ ''
+ )
+ )
+ .addPanels([
+ $.addRowSchema(false,
+ true,
+ 'RGW Overview - All Gateways') +
+ {
+ gridPos: { x: 0, y: 0, w: 24, h: 1 },
+ },
+ RgwOverviewPanel(
+ 'Average GET/PUT Latencies by RGW Instance',
+ '',
+ 's',
+ 'short',
+ |||
+ label_replace(
+ rate(ceph_rgw_get_initial_lat_sum{%(matchers)s}[$__rate_interval]) /
+ rate(ceph_rgw_get_initial_lat_count{%(matchers)s}[$__rate_interval]) *
+ on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s},
+ "rgw_host", "$1", "ceph_daemon", "rgw.(.*)"
+ )
+ ||| % $.matchers(),
+ 'GET {{rgw_host}}',
+ 0,
+ 1,
+ 8,
+ 7
+ ).addTargets(
+ [
+ $.addTargetSchema(
+ |||
+ label_replace(
+ rate(ceph_rgw_put_initial_lat_sum{%(matchers)s}[$__rate_interval]) /
+ rate(ceph_rgw_put_initial_lat_count{%(matchers)s}[$__rate_interval]) *
+ on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s},
+ "rgw_host", "$1", "ceph_daemon", "rgw.(.*)"
+ )
+ ||| % $.matchers(),
+ 'PUT {{rgw_host}}'
+ ),
+ ]
+ ),
+ RgwOverviewPanel(
+ 'Total Requests/sec by RGW Instance',
+ '',
+ 'none',
+ 'short',
+ |||
+ sum by (rgw_host) (
+ label_replace(
+ rate(ceph_rgw_req{%(matchers)s}[$__rate_interval]) *
+ on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s},
+ "rgw_host", "$1", "ceph_daemon", "rgw.(.*)"
+ )
+ )
+ ||| % $.matchers(),
+ '{{rgw_host}}',
+ 8,
+ 1,
+ 7,
+ 7
+ ),
+ RgwOverviewPanel(
+ 'GET Latencies by RGW Instance',
+ 'Latencies are shown stacked, without a yaxis to provide a visual indication of GET latency imbalance across RGW hosts',
+ 's',
+ 'short',
+ |||
+ label_replace(
+ rate(ceph_rgw_get_initial_lat_sum{%(matchers)s}[$__rate_interval]) /
+ rate(ceph_rgw_get_initial_lat_count{%(matchers)s}[$__rate_interval]) *
+ on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s},
+ "rgw_host", "$1", "ceph_daemon", "rgw.(.*)"
+ )
+ ||| % $.matchers(),
+ '{{rgw_host}}',
+ 15,
+ 1,
+ 6,
+ 7
+ ),
+ RgwOverviewPanel(
+ 'Bandwidth Consumed by Type',
+ 'Total bytes transferred in/out of all radosgw instances within the cluster',
+ 'bytes',
+ 'short',
+ 'sum(rate(ceph_rgw_get_b{%(matchers)s}[$__rate_interval]))' % $.matchers(),
+ 'GETs',
+ 0,
+ 8,
+ 8,
+ 6
+ ).addTargets(
+ [$.addTargetSchema('sum(rate(ceph_rgw_put_b{%(matchers)s}[$__rate_interval]))' % $.matchers(),
+ 'PUTs')]
+ ),
+ RgwOverviewPanel(
+ 'Bandwidth by RGW Instance',
+ 'Total bytes transferred in/out through get/put operations, by radosgw instance',
+ 'bytes',
+ 'short',
+ |||
+ label_replace(sum by (instance_id) (
+ rate(ceph_rgw_get_b{%(matchers)s}[$__rate_interval]) +
+ rate(ceph_rgw_put_b{%(matchers)s}[$__rate_interval])) *
+ on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s},
+ "rgw_host", "$1", "ceph_daemon", "rgw.(.*)"
+ )
+ ||| % $.matchers(),
+ '{{rgw_host}}',
+ 8,
+ 8,
+ 7,
+ 6
+ ),
+ RgwOverviewPanel(
+ 'PUT Latencies by RGW Instance',
+ 'Latencies are shown stacked, without a yaxis to provide a visual indication of PUT latency imbalance across RGW hosts',
+ 's',
+ 'short',
+ |||
+ label_replace(
+ rate(ceph_rgw_put_initial_lat_sum{%(matchers)s}[$__rate_interval]) /
+ rate(ceph_rgw_put_initial_lat_count{%(matchers)s}[$__rate_interval]) *
+ on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s},
+ "rgw_host", "$1", "ceph_daemon", "rgw.(.*)"
+ )
+ ||| % $.matchers(),
+ '{{rgw_host}}',
+ 15,
+ 8,
+ 6,
+ 6
+ ),
+ $.addRowSchema(
+ false, true, 'RGW Overview - HAProxy Metrics'
+ ) + { gridPos: { x: 0, y: 12, w: 9, h: 12 } },
+ RgwOverviewPanel(
+ 'Total responses by HTTP code',
+ '',
+ 'short',
+ 'short',
+ |||
+ sum(
+ rate(
+ haproxy_frontend_http_responses_total{code=~"$code", job=~"$job_haproxy", instance=~"$ingress_service", proxy=~"frontend"}[$__rate_interval]
+ )
+ ) by (code)
+ |||,
+ 'Frontend {{ code }}',
+ 0,
+ 12,
+ 5,
+ 12,
+ '$datasource',
+ true,
+ true,
+ true,
+ true,
+ true,
+ true
+ )
+ .addTargets(
+ [
+ $.addTargetSchema(
+ |||
+ sum(
+ rate(
+ haproxy_backend_http_responses_total{code=~"$code", job=~"$job_haproxy", instance=~"$ingress_service", proxy=~"backend"}[$__rate_interval]
+ )
+ ) by (code)
+ |||, 'Backend {{ code }}'
+ ),
+ ]
+ )
+ .addSeriesOverride([
+ {
+ alias: '/.*Back.*/',
+ transform: 'negative-Y',
+ },
+ { alias: '/.*1.*/' },
+ { alias: '/.*2.*/' },
+ { alias: '/.*3.*/' },
+ { alias: '/.*4.*/' },
+ { alias: '/.*5.*/' },
+ { alias: '/.*other.*/' },
+ ]),
+ RgwOverviewPanel(
+ 'Total requests / responses',
+ '',
+ 'short',
+ 'short',
+ |||
+ sum(
+ rate(
+ haproxy_frontend_http_requests_total{proxy=~"frontend", job=~"$job_haproxy", instance=~"$ingress_service"}[$__rate_interval]
+ )
+ ) by (instance)
+ |||,
+ 'Requests',
+ 5,
+ 12,
+ 5,
+ 12,
+ '$datasource',
+ true,
+ true,
+ true,
+ true,
+ true,
+ true
+ )
+ .addTargets(
+ [
+ $.addTargetSchema(
+ |||
+ sum(
+ rate(
+ haproxy_backend_response_errors_total{proxy=~"backend", job=~"$job_haproxy", instance=~"$ingress_service"}[$__rate_interval]
+ )
+ ) by (instance)
+ |||, 'Response errors', 'time_series', 2
+ ),
+ $.addTargetSchema(
+ |||
+ sum(
+ rate(
+ haproxy_frontend_request_errors_total{proxy=~"frontend", job=~"$job_haproxy", instance=~"$ingress_service"}[$__rate_interval]
+ )
+ ) by (instance)
+ |||, 'Requests errors'
+ ),
+ $.addTargetSchema(
+ |||
+ sum(
+ rate(
+ haproxy_backend_redispatch_warnings_total{proxy=~"backend", job=~"$job_haproxy", instance=~"$ingress_service"}[$__rate_interval]
+ )
+ ) by (instance)
+ |||, 'Backend redispatch', 'time_series', 2
+ ),
+ $.addTargetSchema(
+ |||
+ sum(
+ rate(
+ haproxy_backend_retry_warnings_total{proxy=~"backend", job=~"$job_haproxy", instance=~"$ingress_service"}[$__rate_interval]
+ )
+ ) by (instance)
+ |||, 'Backend retry', 'time_series', 2
+ ),
+ $.addTargetSchema(
+ |||
+ sum(
+ rate(
+ haproxy_frontend_requests_denied_total{proxy=~"frontend", job=~"$job_haproxy", instance=~"$ingress_service"}[$__rate_interval]
+ )
+ ) by (instance)
+ |||, 'Request denied', 'time_series', 2
+ ),
+ $.addTargetSchema(
+ |||
+ sum(
+ haproxy_backend_current_queue{proxy=~"backend", job=~"$job_haproxy", instance=~"$ingress_service"}
+ ) by (instance)
+ |||, 'Backend Queued', 'time_series', 2
+ ),
+ ]
+ )
+ .addSeriesOverride([
+ {
+ alias: '/.*Response.*/',
+ transform: 'negative-Y',
+ },
+ {
+ alias: '/.*Backend.*/',
+ transform: 'negative-Y',
+ },
+ ]),
+ RgwOverviewPanel(
+ 'Total number of connections',
+ '',
+ 'short',
+ 'short',
+ |||
+ sum(
+ rate(
+ haproxy_frontend_connections_total{proxy=~"frontend", job=~"$job_haproxy", instance=~"$ingress_service"}[$__rate_interval]
+ )
+ ) by (instance)
+ |||,
+ 'Front',
+ 10,
+ 12,
+ 5,
+ 12,
+ '$datasource',
+ true,
+ true,
+ true,
+ true,
+ true,
+ true
+ )
+ .addTargets(
+ [
+ $.addTargetSchema(
+ |||
+ sum(
+ rate(
+ haproxy_backend_connection_attempts_total{proxy=~"backend", job=~"$job_haproxy", instance=~"$ingress_service"}[$__rate_interval]
+ )
+ ) by (instance)
+ |||, 'Back'
+ ),
+ $.addTargetSchema(
+ |||
+ sum(
+ rate(
+ haproxy_backend_connection_errors_total{proxy=~"backend", job=~"$job_haproxy", instance=~"$ingress_service"}[$__rate_interval]
+ )
+ ) by (instance)
+ |||, 'Back errors'
+ ),
+ ]
+ )
+ .addSeriesOverride([
+ {
+ alias: '/.*Back.*/',
+ transform: 'negative-Y',
+ },
+ ]),
+ RgwOverviewPanel(
+ 'Current total of incoming / outgoing bytes',
+ '',
+ 'short',
+ 'short',
+ |||
+ sum(
+ rate(
+ haproxy_frontend_bytes_in_total{proxy=~"frontend", job=~"$job_haproxy", instance=~"$ingress_service"}[$__rate_interval]
+ ) * 8
+ ) by (instance)
+ |||,
+ 'IN Front',
+ 15,
+ 12,
+ 6,
+ 12,
+ '$datasource',
+ true,
+ true,
+ true,
+ true,
+ true,
+ true
+ )
+ .addTargets(
+ [
+ $.addTargetSchema(
+ |||
+ sum(
+ rate(
+ haproxy_frontend_bytes_out_total{proxy=~"frontend", job=~"$job_haproxy", instance=~"$ingress_service"}[$__rate_interval]
+ ) * 8
+ ) by (instance)
+ |||, 'OUT Front', 'time_series', 2
+ ),
+ $.addTargetSchema(
+ |||
+ sum(
+ rate(
+ haproxy_backend_bytes_in_total{proxy=~"backend", job=~"$job_haproxy", instance=~"$ingress_service"}[$__rate_interval]
+ ) * 8
+ ) by (instance)
+ |||, 'IN Back', 'time_series', 2
+ ),
+ $.addTargetSchema(
+ |||
+ sum(
+ rate(
+ haproxy_backend_bytes_out_total{proxy=~"backend", job=~"$job_haproxy", instance=~"$ingress_service"}[$__rate_interval]
+ ) * 8
+ ) by (instance)
+ |||, 'OUT Back', 'time_series', 2
+ ),
+ ]
+ )
+ .addSeriesOverride([
+ {
+ alias: '/.*OUT.*/',
+ transform: 'negative-Y',
+ },
+ ]),
+ ]),
+ 'radosgw-detail.json':
+ local RgwDetailsPanel(aliasColors,
+ title,
+ description,
+ formatY1,
+ formatY2,
+ expr1,
+ expr2,
+ legendFormat1,
+ legendFormat2,
+ x,
+ y,
+ w,
+ h) =
+ $.graphPanelSchema(aliasColors,
+ title,
+ description,
+ 'null',
+ false,
+ formatY1,
+ formatY2,
+ null,
+ null,
+ 0,
+ 1,
+ '$datasource')
+ .addTargets(
+ [$.addTargetSchema(expr1, legendFormat1), $.addTargetSchema(expr2, legendFormat2)]
+ ) + { gridPos: { x: x, y: y, w: w, h: h } };
+
+ $.dashboardSchema(
+ 'RGW Instance Detail',
+ '',
+ 'x5ARzZtmk',
+ 'now-1h',
+ '30s',
+ 16,
+ $._config.dashboardTags + ['overview'],
+ ''
+ )
+ .addAnnotation(
+ $.addAnnotationSchema(
+ 1,
+ '-- Grafana --',
+ true,
+ true,
+ 'rgba(0, 211, 255, 1)',
+ 'Annotations & Alerts',
+ 'dashboard'
+ )
+ )
+ .addRequired(
+ type='grafana', id='grafana', name='Grafana', version='5.0.0'
+ )
+ .addRequired(
+ type='panel',
+ id='grafana-piechart-panel',
+ name='Pie Chart',
+ version='1.3.3'
+ )
+ .addRequired(
+ type='panel', id='graph', name='Graph', version='5.0.0'
+ )
+ .addTemplate(
+ g.template.datasource('datasource',
+ 'prometheus',
+ 'default',
+ label='Data Source')
+ )
+ .addTemplate(
+ $.addClusterTemplate()
+ )
+ .addTemplate(
+ $.addJobTemplate()
+ )
+ .addTemplate(
+ $.addTemplateSchema('rgw_servers',
+ '$datasource',
+ 'label_values(ceph_rgw_metadata{%(matchers)s}, ceph_daemon)' % $.matchers(),
+ 1,
+ true,
+ 1,
+ '',
+ '')
+ )
+ .addPanels([
+ $.addRowSchema(false, true, 'RGW Host Detail : $rgw_servers') + { gridPos: { x: 0, y: 0, w: 24, h: 1 } },
+ RgwDetailsPanel(
+ {},
+ '$rgw_servers GET/PUT Latencies',
+ '',
+ 's',
+ 'short',
+ |||
+ sum by (instance_id) (
+ rate(ceph_rgw_get_initial_lat_sum{%(matchers)s}[$__rate_interval]) /
+ rate(ceph_rgw_get_initial_lat_count{%(matchers)s}[$__rate_interval])
+ ) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s, ceph_daemon=~"$rgw_servers"}
+ ||| % $.matchers(),
+ |||
+ sum by (instance_id) (
+ rate(ceph_rgw_put_initial_lat_sum{%(matchers)s}[$__rate_interval]) /
+ rate(ceph_rgw_put_initial_lat_count{%(matchers)s}[$__rate_interval])
+ ) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s, ceph_daemon=~"$rgw_servers"}
+ ||| % $.matchers(),
+ 'GET {{ceph_daemon}}',
+ 'PUT {{ceph_daemon}}',
+ 0,
+ 1,
+ 6,
+ 8
+ ),
+ RgwDetailsPanel(
+ {},
+ 'Bandwidth by HTTP Operation',
+ '',
+ 'bytes',
+ 'short',
+ |||
+ rate(ceph_rgw_get_b{%(matchers)s}[$__rate_interval]) *
+ on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s, ceph_daemon=~"$rgw_servers"}
+ ||| % $.matchers(),
+ |||
+ rate(ceph_rgw_put_b{%(matchers)s}[$__rate_interval]) *
+ on (instance_id) group_left (ceph_daemon)
+ ceph_rgw_metadata{%(matchers)s, ceph_daemon=~"$rgw_servers"}
+ ||| % $.matchers(),
+ 'GETs {{ceph_daemon}}',
+ 'PUTs {{ceph_daemon}}',
+ 6,
+ 1,
+ 7,
+ 8
+ ),
+ RgwDetailsPanel(
+ {
+ GETs: '#7eb26d',
+ Other: '#447ebc',
+ PUTs: '#eab839',
+ Requests: '#3f2b5b',
+ 'Requests Failed': '#bf1b00',
+ },
+ 'HTTP Request Breakdown',
+ '',
+ 'short',
+ 'short',
+ |||
+ rate(ceph_rgw_failed_req{%(matchers)s}[$__rate_interval]) *
+ on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s,ceph_daemon=~"$rgw_servers"}
+ ||| % $.matchers(),
+ |||
+ rate(ceph_rgw_get{%(matchers)s}[$__rate_interval]) *
+ on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s, ceph_daemon=~"$rgw_servers"}
+ ||| % $.matchers(),
+ 'Requests Failed {{ceph_daemon}}',
+ 'GETs {{ceph_daemon}}',
+ 13,
+ 1,
+ 7,
+ 8
+ )
+ .addTargets(
+ [
+ $.addTargetSchema(
+ |||
+ rate(ceph_rgw_put{%(matchers)s}[$__rate_interval]) *
+ on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s, ceph_daemon=~"$rgw_servers"}
+ ||| % $.matchers(),
+ 'PUTs {{ceph_daemon}}'
+ ),
+ $.addTargetSchema(
+ |||
+ (
+ rate(ceph_rgw_req{%(matchers)s}[$__rate_interval]) -
+ (
+ rate(ceph_rgw_get{%(matchers)s}[$__rate_interval]) +
+ rate(ceph_rgw_put{%(matchers)s}[$__rate_interval])
+ )
+ ) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s, ceph_daemon=~"$rgw_servers"}
+ ||| % $.matchers(),
+ 'Other {{ceph_daemon}}'
+ ),
+ ]
+ ),
+ $.simplePieChart(
+ {
+ GETs: '#7eb26d',
+ 'Other (HEAD,POST,DELETE)': '#447ebc',
+ PUTs: '#eab839',
+ Requests: '#3f2b5b',
+ Failures: '#bf1b00',
+ }, '', 'Workload Breakdown'
+ )
+ .addTarget($.addTargetSchema(
+ |||
+ rate(ceph_rgw_failed_req{%(matchers)s}[$__rate_interval]) *
+ on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s, ceph_daemon=~"$rgw_servers"}
+ ||| % $.matchers(),
+ 'Failures {{ceph_daemon}}'
+ ))
+ .addTarget($.addTargetSchema(
+ |||
+ rate(ceph_rgw_get{%(matchers)s}[$__rate_interval]) *
+ on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s, ceph_daemon=~"$rgw_servers"}
+ ||| % $.matchers(),
+ 'GETs {{ceph_daemon}}'
+ ))
+ .addTarget($.addTargetSchema(
+ |||
+ rate(ceph_rgw_put{%(matchers)s}[$__rate_interval]) *
+ on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s, ceph_daemon=~"$rgw_servers"}
+ ||| % $.matchers(),
+ 'PUTs {{ceph_daemon}}'
+ ))
+ .addTarget($.addTargetSchema(
+ |||
+ (
+ rate(ceph_rgw_req{%(matchers)s}[$__rate_interval]) -
+ (
+ rate(ceph_rgw_get{%(matchers)s}[$__rate_interval]) +
+ rate(ceph_rgw_put{%(matchers)s}[$__rate_interval])
+ )
+ ) * on (instance_id) group_left (ceph_daemon)
+ ceph_rgw_metadata{%(matchers)s, ceph_daemon=~"$rgw_servers"}
+ ||| % $.matchers(),
+ 'Other (DELETE,LIST) {{ceph_daemon}}'
+ )) + { gridPos: { x: 20, y: 1, w: 4, h: 8 } },
+ ]),
+}
diff --git a/monitoring/ceph-mixin/dashboards/utils.libsonnet b/monitoring/ceph-mixin/dashboards/utils.libsonnet
new file mode 100644
index 000000000..a7774c7ce
--- /dev/null
+++ b/monitoring/ceph-mixin/dashboards/utils.libsonnet
@@ -0,0 +1,333 @@
+local g = import 'grafonnet/grafana.libsonnet';
+
+{
+ _config:: error 'must provide _config',
+
+ dashboardSchema(title,
+ description,
+ uid,
+ time_from,
+ refresh,
+ schemaVersion,
+ tags,
+ timezone)::
+ g.dashboard.new(title=title,
+ description=description,
+ uid=uid,
+ time_from=time_from,
+ refresh=refresh,
+ schemaVersion=schemaVersion,
+ tags=tags,
+ timezone=timezone),
+
+ graphPanelSchema(aliasColors,
+ title,
+ description,
+ nullPointMode,
+ stack,
+ formatY1,
+ formatY2,
+ labelY1,
+ labelY2,
+ min,
+ fill,
+ datasource,
+ legend_alignAsTable=false,
+ legend_avg=false,
+ legend_min=false,
+ legend_max=false,
+ legend_current=false,
+ legend_values=false)::
+ g.graphPanel.new(aliasColors=aliasColors,
+ title=title,
+ description=description,
+ nullPointMode=nullPointMode,
+ stack=stack,
+ formatY1=formatY1,
+ formatY2=formatY2,
+ labelY1=labelY1,
+ labelY2=labelY2,
+ min=min,
+ fill=fill,
+ datasource=datasource,
+ legend_alignAsTable=legend_alignAsTable,
+ legend_avg=legend_avg,
+ legend_min=legend_min,
+ legend_max=legend_max,
+ legend_current=legend_current,
+ legend_values=legend_values),
+
+
+ addTargetSchema(expr, legendFormat='', format='time_series', intervalFactor=1, instant=null)::
+ g.prometheus.target(expr=expr,
+ legendFormat=legendFormat,
+ format=format,
+ intervalFactor=intervalFactor,
+ instant=instant),
+
+ addTemplateSchema(name,
+ datasource,
+ query,
+ refresh,
+ includeAll,
+ sort,
+ label,
+ regex,
+ hide='',
+ multi=false,
+ allValues=null)::
+ g.template.new(name=name,
+ datasource=datasource,
+ query=query,
+ refresh=refresh,
+ includeAll=includeAll,
+ sort=sort,
+ label=label,
+ regex=regex,
+ hide=hide,
+ multi=multi,
+ allValues=allValues),
+
+ addAnnotationSchema(builtIn,
+ datasource,
+ enable,
+ hide,
+ iconColor,
+ name,
+ type)::
+ g.annotation.datasource(builtIn=builtIn,
+ datasource=datasource,
+ enable=enable,
+ hide=hide,
+ iconColor=iconColor,
+ name=name,
+ type=type),
+
+ addRowSchema(collapse, showTitle, title)::
+ g.row.new(collapse=collapse, showTitle=showTitle, title=title),
+
+ addSingleStatSchema(colors,
+ datasource,
+ format,
+ title,
+ description,
+ valueName,
+ colorValue,
+ gaugeMaxValue,
+ gaugeShow,
+ sparklineShow,
+ thresholds)::
+ g.singlestat.new(colors=colors,
+ datasource=datasource,
+ format=format,
+ title=title,
+ description=description,
+ valueName=valueName,
+ colorValue=colorValue,
+ gaugeMaxValue=gaugeMaxValue,
+ gaugeShow=gaugeShow,
+ sparklineShow=sparklineShow,
+ thresholds=thresholds),
+
+ addPieChartSchema(aliasColors,
+ datasource,
+ description,
+ legendType,
+ pieType,
+ title,
+ valueName)::
+ g.pieChartPanel.new(aliasColors=aliasColors,
+ datasource=datasource,
+ description=description,
+ legendType=legendType,
+ pieType=pieType,
+ title=title,
+ valueName=valueName),
+
+ addTableSchema(datasource, description, sort, styles, title, transform)::
+ g.tablePanel.new(datasource=datasource,
+ description=description,
+ sort=sort,
+ styles=styles,
+ title=title,
+ transform=transform),
+
+ addStyle(alias,
+ colorMode,
+ colors,
+ dateFormat,
+ decimals,
+ mappingType,
+ pattern,
+ thresholds,
+ type,
+ unit,
+ valueMaps)::
+ {
+ alias: alias,
+ colorMode: colorMode,
+ colors: colors,
+ dateFormat: dateFormat,
+ decimals: decimals,
+ mappingType: mappingType,
+ pattern: pattern,
+ thresholds: thresholds,
+ type: type,
+ unit: unit,
+ valueMaps: valueMaps,
+ },
+
+ matchers()::
+ local jobMatcher = 'job=~"$job"';
+ local clusterMatcher = '%s=~"$cluster"' % $._config.clusterLabel;
+ {
+ // Common labels
+ jobMatcher: jobMatcher,
+ clusterMatcher: (if $._config.showMultiCluster then clusterMatcher else ''),
+ matchers: jobMatcher +
+ (if $._config.showMultiCluster then ', ' + clusterMatcher else ''),
+ },
+
+ addClusterTemplate()::
+ $.addTemplateSchema(
+ 'cluster',
+ '$datasource',
+ 'label_values(ceph_osd_metadata, %s)' % $._config.clusterLabel,
+ 1,
+ true,
+ 1,
+ 'cluster',
+ '(.*)',
+ if !$._config.showMultiCluster then 'variable' else '',
+ multi=true,
+ allValues='.+',
+ ),
+
+ addJobTemplate()::
+ $.addTemplateSchema(
+ 'job',
+ '$datasource',
+ 'label_values(ceph_osd_metadata{%(clusterMatcher)s}, job)' % $.matchers(),
+ 1,
+ true,
+ 1,
+ 'job',
+ '(.*)',
+ multi=true,
+ allValues='.+',
+ ),
+
+ overviewStyle(alias,
+ pattern,
+ type,
+ unit,
+ colorMode=null,
+ thresholds=[],
+ valueMaps=[])::
+ $.addStyle(alias,
+ colorMode,
+ [
+ 'rgba(245, 54, 54, 0.9)',
+ 'rgba(237, 129, 40, 0.89)',
+ 'rgba(50, 172, 45, 0.97)',
+ ],
+ 'YYYY-MM-DD HH:mm:ss',
+ 2,
+ 1,
+ pattern,
+ thresholds,
+ type,
+ unit,
+ valueMaps),
+
+ simpleGraphPanel(alias,
+ title,
+ description,
+ formatY1,
+ labelY1,
+ min,
+ expr,
+ legendFormat,
+ x,
+ y,
+ w,
+ h)::
+ $.graphPanelSchema(alias,
+ title,
+ description,
+ 'null',
+ false,
+ formatY1,
+ 'short',
+ labelY1,
+ null,
+ min,
+ 1,
+ '$datasource')
+ .addTargets(
+ [$.addTargetSchema(expr, legendFormat)]
+ ) + { gridPos: { x: x, y: y, w: w, h: h } },
+
+ simpleSingleStatPanel(format,
+ title,
+ description,
+ valueName,
+ expr,
+ instant,
+ targetFormat,
+ x,
+ y,
+ w,
+ h)::
+ $.addSingleStatSchema(['#299c46', 'rgba(237, 129, 40, 0.89)', '#d44a3a'],
+ '$datasource',
+ format,
+ title,
+ description,
+ valueName,
+ false,
+ 100,
+ false,
+ false,
+ '')
+ .addTarget($.addTargetSchema(expr, '', targetFormat, 1, instant)) + {
+ gridPos: { x: x, y: y, w: w, h: h },
+ },
+ gaugeSingleStatPanel(format,
+ title,
+ description,
+ valueName,
+ colorValue,
+ gaugeMaxValue,
+ gaugeShow,
+ sparkLineShow,
+ thresholds,
+ expr,
+ targetFormat,
+ x,
+ y,
+ w,
+ h)::
+ $.addSingleStatSchema(['#299c46', 'rgba(237, 129, 40, 0.89)', '#d44a3a'],
+ '$datasource',
+ format,
+ title,
+ description,
+ valueName,
+ colorValue,
+ gaugeMaxValue,
+ gaugeShow,
+ sparkLineShow,
+ thresholds)
+ .addTarget($.addTargetSchema(expr, '', targetFormat)) + { gridPos: { x:
+ x, y: y, w: w, h: h } },
+
+ simplePieChart(alias, description, title)::
+ $.addPieChartSchema(alias,
+ '$datasource',
+ description,
+ 'Under graph',
+ 'pie',
+ title,
+ 'current'),
+}