summaryrefslogtreecommitdiffstats
path: root/src/pybind/mgr/zabbix
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
commit483eb2f56657e8e7f419ab1a4fab8dce9ade8609 (patch)
treee5d88d25d870d5dedacb6bbdbe2a966086a0a5cf /src/pybind/mgr/zabbix
parentInitial commit. (diff)
downloadceph-483eb2f56657e8e7f419ab1a4fab8dce9ade8609.tar.xz
ceph-483eb2f56657e8e7f419ab1a4fab8dce9ade8609.zip
Adding upstream version 14.2.21.upstream/14.2.21upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/pybind/mgr/zabbix')
-rw-r--r--src/pybind/mgr/zabbix/__init__.py1
-rw-r--r--src/pybind/mgr/zabbix/module.py380
-rw-r--r--src/pybind/mgr/zabbix/zabbix_template.xml2569
3 files changed, 2950 insertions, 0 deletions
diff --git a/src/pybind/mgr/zabbix/__init__.py b/src/pybind/mgr/zabbix/__init__.py
new file mode 100644
index 00000000..8f210ac9
--- /dev/null
+++ b/src/pybind/mgr/zabbix/__init__.py
@@ -0,0 +1 @@
+from .module import Module
diff --git a/src/pybind/mgr/zabbix/module.py b/src/pybind/mgr/zabbix/module.py
new file mode 100644
index 00000000..25d2f825
--- /dev/null
+++ b/src/pybind/mgr/zabbix/module.py
@@ -0,0 +1,380 @@
+"""
+Zabbix module for ceph-mgr
+
+Collect statistics from Ceph cluster and every X seconds send data to a Zabbix
+server using the zabbix_sender executable.
+"""
+import json
+import errno
+import re
+from subprocess import Popen, PIPE
+from threading import Event
+from mgr_module import MgrModule
+
+
+def avg(data):
+ if len(data):
+ return sum(data) / float(len(data))
+ else:
+ return 0
+
+
+class ZabbixSender(object):
+ def __init__(self, sender, host, port, log):
+ self.sender = sender
+ self.host = host
+ self.port = port
+ self.log = log
+
+ def send(self, hostname, data):
+ if len(data) == 0:
+ return
+
+ cmd = [self.sender, '-z', self.host, '-p', str(self.port), '-s',
+ hostname, '-vv', '-i', '-']
+
+ self.log.debug('Executing: %s', cmd)
+
+ proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
+
+ for key, value in data.items():
+ proc.stdin.write('{0} ceph.{1} {2}\n'.format(hostname, key, value).encode('utf-8'))
+
+ stdout, stderr = proc.communicate()
+ if proc.returncode != 0:
+ raise RuntimeError('%s exited non-zero: %s' % (self.sender,
+ stderr))
+
+ self.log.debug('Zabbix Sender: %s', stdout.rstrip())
+
+
+class Module(MgrModule):
+ run = False
+ config = dict()
+ ceph_health_mapping = {'HEALTH_OK': 0, 'HEALTH_WARN': 1, 'HEALTH_ERR': 2}
+ _zabbix_hosts = list()
+
+ @property
+ def config_keys(self):
+ return dict((o['name'], o.get('default', None))
+ for o in self.MODULE_OPTIONS)
+
+ MODULE_OPTIONS = [
+ {
+ 'name': 'zabbix_sender',
+ 'default': '/usr/bin/zabbix_sender'
+ },
+ {
+ 'name': 'zabbix_host',
+ 'default': None
+ },
+ {
+ 'name': 'zabbix_port',
+ 'type': 'int',
+ 'default': 10051
+ },
+ {
+ 'name': 'identifier',
+ 'default': ""
+ },
+ {
+ 'name': 'interval',
+ 'type': 'secs',
+ 'default': 60
+ }
+ ]
+
+ COMMANDS = [
+ {
+ "cmd": "zabbix config-set name=key,type=CephString "
+ "name=value,type=CephString",
+ "desc": "Set a configuration value",
+ "perm": "rw"
+ },
+ {
+ "cmd": "zabbix config-show",
+ "desc": "Show current configuration",
+ "perm": "r"
+ },
+ {
+ "cmd": "zabbix send",
+ "desc": "Force sending data to Zabbix",
+ "perm": "rw"
+ },
+ ]
+
+ def __init__(self, *args, **kwargs):
+ super(Module, self).__init__(*args, **kwargs)
+ self.event = Event()
+
+ def init_module_config(self):
+ self.fsid = self.get('mon_map')['fsid']
+ self.log.debug('Found Ceph fsid %s', self.fsid)
+
+ for key, default in self.config_keys.items():
+ self.set_config_option(key, self.get_module_option(key, default))
+
+ if self.config['zabbix_host']:
+ self._parse_zabbix_hosts()
+
+ def set_config_option(self, option, value):
+ if option not in self.config_keys.keys():
+ raise RuntimeError('{0} is a unknown configuration '
+ 'option'.format(option))
+
+ if option in ['zabbix_port', 'interval']:
+ try:
+ value = int(value)
+ except (ValueError, TypeError):
+ raise RuntimeError('invalid {0} configured. Please specify '
+ 'a valid integer'.format(option))
+
+ if option == 'interval' and value < 10:
+ raise RuntimeError('interval should be set to at least 10 seconds')
+
+ self.log.debug('Setting in-memory config option %s to: %s', option,
+ value)
+ self.config[option] = value
+ return True
+
+ def _parse_zabbix_hosts(self):
+ self._zabbix_hosts = list()
+ servers = self.config['zabbix_host'].split(",")
+ for server in servers:
+ uri = re.match("(?:(?:\[?)([a-z0-9-\.]+|[a-f0-9:\.]+)(?:\]?))(?:((?::))([0-9]{1,5}))?$", server)
+ if uri:
+ zabbix_host, sep, zabbix_port = uri.groups()
+ zabbix_port = zabbix_port if sep == ':' else self.config['zabbix_port']
+ self._zabbix_hosts.append({'zabbix_host': zabbix_host, 'zabbix_port': zabbix_port})
+ else:
+ self.log.error('Zabbix host "%s" is not valid', server)
+
+ self.log.error('Parsed Zabbix hosts: %s', self._zabbix_hosts)
+
+ def get_pg_stats(self):
+ stats = dict()
+
+ pg_states = ['active', 'peering', 'clean', 'scrubbing', 'undersized',
+ 'backfilling', 'recovering', 'degraded', 'inconsistent',
+ 'remapped', 'backfill_toofull', 'backfill_wait',
+ 'recovery_wait']
+
+ for state in pg_states:
+ stats['num_pg_{0}'.format(state)] = 0
+
+ pg_status = self.get('pg_status')
+
+ stats['num_pg'] = pg_status['num_pgs']
+
+ for state in pg_status['pgs_by_state']:
+ states = state['state_name'].split('+')
+ for s in pg_states:
+ key = 'num_pg_{0}'.format(s)
+ if s in states:
+ stats[key] += state['count']
+
+ return stats
+
+ def get_data(self):
+ data = dict()
+
+ health = json.loads(self.get('health')['json'])
+ # 'status' is luminous+, 'overall_status' is legacy mode.
+ data['overall_status'] = health.get('status',
+ health.get('overall_status'))
+ data['overall_status_int'] = \
+ self.ceph_health_mapping.get(data['overall_status'])
+
+ mon_status = json.loads(self.get('mon_status')['json'])
+ data['num_mon'] = len(mon_status['monmap']['mons'])
+
+ df = self.get('df')
+ data['num_pools'] = len(df['pools'])
+ data['total_used_bytes'] = df['stats']['total_used_bytes']
+ data['total_bytes'] = df['stats']['total_bytes']
+ data['total_avail_bytes'] = df['stats']['total_avail_bytes']
+
+ wr_ops = 0
+ rd_ops = 0
+ wr_bytes = 0
+ rd_bytes = 0
+
+ for pool in df['pools']:
+ wr_ops += pool['stats']['wr']
+ rd_ops += pool['stats']['rd']
+ wr_bytes += pool['stats']['wr_bytes']
+ rd_bytes += pool['stats']['rd_bytes']
+
+ data['wr_ops'] = wr_ops
+ data['rd_ops'] = rd_ops
+ data['wr_bytes'] = wr_bytes
+ data['rd_bytes'] = rd_bytes
+
+ osd_map = self.get('osd_map')
+ data['num_osd'] = len(osd_map['osds'])
+ data['osd_nearfull_ratio'] = osd_map['nearfull_ratio']
+ data['osd_full_ratio'] = osd_map['full_ratio']
+ data['osd_backfillfull_ratio'] = osd_map['backfillfull_ratio']
+
+ data['num_pg_temp'] = len(osd_map['pg_temp'])
+
+ num_up = 0
+ num_in = 0
+ for osd in osd_map['osds']:
+ if osd['up'] == 1:
+ num_up += 1
+
+ if osd['in'] == 1:
+ num_in += 1
+
+ data['num_osd_up'] = num_up
+ data['num_osd_in'] = num_in
+
+ osd_fill = list()
+ osd_pgs = list()
+ osd_apply_latency_ns = list()
+ osd_commit_latency_ns = list()
+
+ osd_stats = self.get('osd_stats')
+ for osd in osd_stats['osd_stats']:
+ if osd['kb'] == 0:
+ continue
+ osd_fill.append((float(osd['kb_used']) / float(osd['kb'])) * 100)
+ osd_pgs.append(osd['num_pgs'])
+ osd_apply_latency_ns.append(osd['perf_stat']['apply_latency_ns'])
+ osd_commit_latency_ns.append(osd['perf_stat']['commit_latency_ns'])
+
+ try:
+ data['osd_max_fill'] = max(osd_fill)
+ data['osd_min_fill'] = min(osd_fill)
+ data['osd_avg_fill'] = avg(osd_fill)
+ data['osd_max_pgs'] = max(osd_pgs)
+ data['osd_min_pgs'] = min(osd_pgs)
+ data['osd_avg_pgs'] = avg(osd_pgs)
+ except ValueError:
+ pass
+
+ try:
+ data['osd_latency_apply_max'] = max(osd_apply_latency_ns) / 1000000.0 # ns -> ms
+ data['osd_latency_apply_min'] = min(osd_apply_latency_ns) / 1000000.0 # ns -> ms
+ data['osd_latency_apply_avg'] = avg(osd_apply_latency_ns) / 1000000.0 # ns -> ms
+
+ data['osd_latency_commit_max'] = max(osd_commit_latency_ns) / 1000000.0 # ns -> ms
+ data['osd_latency_commit_min'] = min(osd_commit_latency_ns) / 1000000.0 # ns -> ms
+ data['osd_latency_commit_avg'] = avg(osd_commit_latency_ns) / 1000000.0 # ns -> ms
+ except ValueError:
+ pass
+
+ data.update(self.get_pg_stats())
+
+ return data
+
+ def send(self):
+ data = self.get_data()
+
+ identifier = self.config['identifier']
+ if identifier is None or len(identifier) == 0:
+ identifier = 'ceph-{0}'.format(self.fsid)
+
+ if not self.config['zabbix_host'] or not self._zabbix_hosts:
+ self.log.error('Zabbix server not set, please configure using: '
+ 'ceph zabbix config-set zabbix_host <zabbix_host>')
+ self.set_health_checks({
+ 'MGR_ZABBIX_NO_SERVER': {
+ 'severity': 'warning',
+ 'summary': 'No Zabbix server configured',
+ 'detail': ['Configuration value zabbix_host not configured']
+ }
+ })
+ return
+
+ result = True
+
+ for server in self._zabbix_hosts:
+ self.log.info(
+ 'Sending data to Zabbix server %s, port %s as host/identifier %s',
+ server['zabbix_host'], server['zabbix_port'], identifier)
+ self.log.debug(data)
+
+ try:
+ zabbix = ZabbixSender(self.config['zabbix_sender'],
+ server['zabbix_host'],
+ server['zabbix_port'], self.log)
+ zabbix.send(identifier, data)
+ except Exception as exc:
+ self.log.exception('Failed to send.')
+ self.set_health_checks({
+ 'MGR_ZABBIX_SEND_FAILED': {
+ 'severity': 'warning',
+ 'summary': 'Failed to send data to Zabbix',
+ 'detail': [str(exc)]
+ }
+ })
+ result = False
+
+ self.set_health_checks(dict())
+ return result
+
+ def handle_command(self, inbuf, command):
+ if command['prefix'] == 'zabbix config-show':
+ return 0, json.dumps(self.config), ''
+ elif command['prefix'] == 'zabbix config-set':
+ key = command['key']
+ value = command['value']
+ if not value:
+ return -errno.EINVAL, '', 'Value should not be empty or None'
+
+ self.log.debug('Setting configuration option %s to %s', key, value)
+ if self.set_config_option(key, value):
+ self.set_module_option(key, value)
+ if key == 'zabbix_host' or key == 'zabbix_port':
+ self._parse_zabbix_hosts()
+ return 0, 'Configuration option {0} updated'.format(key), ''
+
+ return 1,\
+ 'Failed to update configuration option {0}'.format(key), ''
+
+ elif command['prefix'] == 'zabbix send':
+ if self.send():
+ return 0, 'Sending data to Zabbix', ''
+
+ return 1, 'Failed to send data to Zabbix', ''
+ else:
+ return (-errno.EINVAL, '',
+ "Command not found '{0}'".format(command['prefix']))
+
+ def shutdown(self):
+ self.log.info('Stopping zabbix')
+ self.run = False
+ self.event.set()
+
+ def serve(self):
+ self.log.info('Zabbix module starting up')
+ self.run = True
+
+ self.init_module_config()
+
+ while self.run:
+ self.log.debug('Waking up for new iteration')
+
+ try:
+ self.send()
+ except Exception as exc:
+ # Shouldn't happen, but let's log it and retry next interval,
+ # rather than dying completely.
+ self.log.exception("Unexpected error during send():")
+
+ interval = self.config['interval']
+ self.log.debug('Sleeping for %d seconds', interval)
+ self.event.wait(interval)
+
+ def self_test(self):
+ data = self.get_data()
+
+ if data['overall_status'] not in self.ceph_health_mapping:
+ raise RuntimeError('No valid overall_status found in data')
+
+ int(data['overall_status_int'])
+
+ if data['num_mon'] < 1:
+ raise RuntimeError('num_mon is smaller than 1')
diff --git a/src/pybind/mgr/zabbix/zabbix_template.xml b/src/pybind/mgr/zabbix/zabbix_template.xml
new file mode 100644
index 00000000..d75d912e
--- /dev/null
+++ b/src/pybind/mgr/zabbix/zabbix_template.xml
@@ -0,0 +1,2569 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<zabbix_export>
+ <version>3.0</version>
+ <date>2017-10-24T07:00:13Z</date>
+ <groups>
+ <group>
+ <name>Templates</name>
+ </group>
+ </groups>
+ <templates>
+ <template>
+ <template>ceph-mgr Zabbix module</template>
+ <name>ceph-mgr Zabbix module</name>
+ <description/>
+ <groups>
+ <group>
+ <name>Templates</name>
+ </group>
+ </groups>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <items>
+ <item>
+ <name>Number of Monitors</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.num_mon</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Number of Monitors configured in Ceph cluster</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Number of OSDs</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.num_osd</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Number of OSDs in Ceph cluster</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Number of OSDs in state: IN</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.num_osd_in</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Total number of IN OSDs in Ceph cluster</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Number of OSDs in state: UP</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.num_osd_up</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Total number of UP OSDs in Ceph cluster</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Number of Placement Groups</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.num_pg</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Total number of Placement Groups in Ceph cluster</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Number of Placement Groups in Temporary state</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.num_pg_temp</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Total number of Placement Groups in pg_temp state</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Number of Placement Groups in Active state</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.num_pg_active</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Total number of Placement Groups in active state</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Number of Placement Groups in Clean state</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.num_pg_clean</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Total number of Placement Groups in clean state</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Number of Placement Groups in Peering state</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.num_pg_peering</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Total number of Placement Groups in peering state</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Number of Placement Groups in Scrubbing state</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.num_pg_scrubbing</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Total number of Placement Groups in scrubbing state</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Number of Placement Groups in Undersized state</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.num_pg_undersized</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Total number of Placement Groups in undersized state</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Number of Placement Groups in Backfilling state</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.num_pg_backfilling</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Total number of Placement Groups in backfilling state</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Number of Placement Groups in degraded state</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.num_pg_degraded</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Total number of Placement Groups in degraded state</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Number of Placement Groups in inconsistent state</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.num_pg_inconsistent</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Total number of Placement Groups in inconsistent state</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Number of Placement Groups in remapped state</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.num_pg_remapped</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Total number of Placement Groups in remapped state</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Number of Placement Groups in recovering state</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.num_pg_recovering</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Total number of Placement Groups in recovering state</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Number of Placement Groups in backfill_toofull state</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.num_pg_backfill_toofull</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Total number of Placement Groups in backfill_toofull state</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Number of Placement Groups in backfill_wait state</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.num_pg_backfill_wait</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Total number of Placement Groups in backfill_wait state</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Number of Placement Groups in recovery_wait state</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.num_pg_recovery_wait</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Total number of Placement Groups in recovery_wait state</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Number of Pools</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.num_pools</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Total number of pools in Ceph cluster</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Ceph OSD avg fill</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.osd_avg_fill</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>0</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Average fill of OSDs</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Ceph OSD max PGs</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.osd_max_pgs</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>0</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Maximum amount of PGs on OSDs</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Ceph OSD min PGs</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.osd_min_pgs</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>0</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Minimum amount of PGs on OSDs</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Ceph OSD avg PGs</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.osd_avg_pgs</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>0</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Average amount of PGs on OSDs</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Ceph backfill full ratio</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>1</multiplier>
+ <snmp_oid/>
+ <key>ceph.osd_backfillfull_ratio</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>0</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>100</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Backfill full ratio setting of Ceph cluster as configured on OSDMap</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Ceph full ratio</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>1</multiplier>
+ <snmp_oid/>
+ <key>ceph.osd_full_ratio</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>0</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>100</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Full ratio setting of Ceph cluster as configured on OSDMap</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Ceph OSD Apply latency Avg</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.osd_latency_apply_avg</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>0</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Average apply latency of OSDs</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Ceph OSD Apply latency Max</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.osd_latency_apply_max</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>0</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Maximum apply latency of OSDs</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Ceph OSD Apply latency Min</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.osd_latency_apply_min</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>0</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Miniumum apply latency of OSDs</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Ceph OSD Commit latency Avg</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.osd_latency_commit_avg</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>0</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Average commit latency of OSDs</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Ceph OSD Commit latency Max</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.osd_latency_commit_max</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>0</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Maximum commit latency of OSDs</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Ceph OSD Commit latency Min</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.osd_latency_commit_min</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>0</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Minimum commit latency of OSDs</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Ceph OSD max fill</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.osd_max_fill</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>0</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Percentage fill of maximum filled OSD</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Ceph OSD min fill</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.osd_min_fill</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>0</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Percentage fill of minimum filled OSD</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Ceph nearfull ratio</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>1</multiplier>
+ <snmp_oid/>
+ <key>ceph.osd_nearfull_ratio</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>0</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>100</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Near full ratio setting of Ceph cluster as configured on OSDMap</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Overall Ceph status</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.overall_status</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>0</trends>
+ <status>0</status>
+ <value_type>4</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Overall Ceph cluster status, eg HEALTH_OK, HEALTH_WARN of HEALTH_ERR</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Overal Ceph status (numeric)</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.overall_status_int</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Overal Ceph status in numeric value. OK: 0, WARN: 1, ERR: 2</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Ceph Read bandwidth</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.rd_bytes</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units>b</units>
+ <delta>1</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Global read bandwidth</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Ceph Read operations</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.rd_ops</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>1</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Global read operations per second</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Total bytes available</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.total_avail_bytes</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units>B</units>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Total bytes available in Ceph cluster</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Total bytes</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.total_bytes</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units>B</units>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Total (RAW) capacity of Ceph cluster in bytes</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Total number of objects</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.total_objects</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Total number of objects in Ceph cluster</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Total bytes used</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.total_used_bytes</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units>B</units>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Total bytes used in Ceph cluster</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Ceph Write bandwidth</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.wr_bytes</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units>b</units>
+ <delta>1</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Global write bandwidth</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Ceph Write operations</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.wr_ops</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>1</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Global write operations per second</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ </items>
+ <discovery_rules/>
+ <macros/>
+ <templates/>
+ <screens>
+ <screen>
+ <name>Ceph</name>
+ <hsize>1</hsize>
+ <vsize>7</vsize>
+ <screen_items>
+ <screen_item>
+ <resourcetype>0</resourcetype>
+ <width>500</width>
+ <height>100</height>
+ <x>0</x>
+ <y>0</y>
+ <colspan>1</colspan>
+ <rowspan>1</rowspan>
+ <elements>0</elements>
+ <valign>0</valign>
+ <halign>0</halign>
+ <style>0</style>
+ <url/>
+ <dynamic>0</dynamic>
+ <sort_triggers>0</sort_triggers>
+ <resource>
+ <name>Ceph storage overview</name>
+ <host>ceph-mgr Zabbix module</host>
+ </resource>
+ <max_columns>3</max_columns>
+ <application/>
+ </screen_item>
+ <screen_item>
+ <resourcetype>0</resourcetype>
+ <width>900</width>
+ <height>200</height>
+ <x>0</x>
+ <y>1</y>
+ <colspan>1</colspan>
+ <rowspan>1</rowspan>
+ <elements>0</elements>
+ <valign>0</valign>
+ <halign>0</halign>
+ <style>0</style>
+ <url/>
+ <dynamic>0</dynamic>
+ <sort_triggers>0</sort_triggers>
+ <resource>
+ <name>Ceph free space</name>
+ <host>ceph-mgr Zabbix module</host>
+ </resource>
+ <max_columns>3</max_columns>
+ <application/>
+ </screen_item>
+ <screen_item>
+ <resourcetype>0</resourcetype>
+ <width>900</width>
+ <height>200</height>
+ <x>0</x>
+ <y>2</y>
+ <colspan>1</colspan>
+ <rowspan>1</rowspan>
+ <elements>0</elements>
+ <valign>0</valign>
+ <halign>0</halign>
+ <style>0</style>
+ <url/>
+ <dynamic>0</dynamic>
+ <sort_triggers>0</sort_triggers>
+ <resource>
+ <name>Ceph health</name>
+ <host>ceph-mgr Zabbix module</host>
+ </resource>
+ <max_columns>3</max_columns>
+ <application/>
+ </screen_item>
+ <screen_item>
+ <resourcetype>0</resourcetype>
+ <width>900</width>
+ <height>200</height>
+ <x>0</x>
+ <y>3</y>
+ <colspan>1</colspan>
+ <rowspan>1</rowspan>
+ <elements>0</elements>
+ <valign>0</valign>
+ <halign>0</halign>
+ <style>0</style>
+ <url/>
+ <dynamic>0</dynamic>
+ <sort_triggers>0</sort_triggers>
+ <resource>
+ <name>Ceph bandwidth</name>
+ <host>ceph-mgr Zabbix module</host>
+ </resource>
+ <max_columns>3</max_columns>
+ <application/>
+ </screen_item>
+ <screen_item>
+ <resourcetype>0</resourcetype>
+ <width>900</width>
+ <height>200</height>
+ <x>0</x>
+ <y>4</y>
+ <colspan>1</colspan>
+ <rowspan>1</rowspan>
+ <elements>0</elements>
+ <valign>0</valign>
+ <halign>0</halign>
+ <style>0</style>
+ <url/>
+ <dynamic>0</dynamic>
+ <sort_triggers>0</sort_triggers>
+ <resource>
+ <name>Ceph I/O</name>
+ <host>ceph-mgr Zabbix module</host>
+ </resource>
+ <max_columns>3</max_columns>
+ <application/>
+ </screen_item>
+ <screen_item>
+ <resourcetype>0</resourcetype>
+ <width>900</width>
+ <height>200</height>
+ <x>0</x>
+ <y>5</y>
+ <colspan>1</colspan>
+ <rowspan>1</rowspan>
+ <elements>0</elements>
+ <valign>0</valign>
+ <halign>0</halign>
+ <style>0</style>
+ <url/>
+ <dynamic>0</dynamic>
+ <sort_triggers>0</sort_triggers>
+ <resource>
+ <name>Ceph OSD utilization</name>
+ <host>ceph-mgr Zabbix module</host>
+ </resource>
+ <max_columns>3</max_columns>
+ <application/>
+ </screen_item>
+ <screen_item>
+ <resourcetype>0</resourcetype>
+ <width>900</width>
+ <height>200</height>
+ <x>0</x>
+ <y>6</y>
+ <colspan>1</colspan>
+ <rowspan>1</rowspan>
+ <elements>0</elements>
+ <valign>0</valign>
+ <halign>0</halign>
+ <style>0</style>
+ <url/>
+ <dynamic>0</dynamic>
+ <sort_triggers>0</sort_triggers>
+ <resource>
+ <name>Ceph OSD latency</name>
+ <host>ceph-mgr Zabbix module</host>
+ </resource>
+ <max_columns>3</max_columns>
+ <application/>
+ </screen_item>
+ </screen_items>
+ </screen>
+ </screens>
+ </template>
+ </templates>
+ <triggers>
+ <trigger>
+ <expression>{ceph-mgr Zabbix module:ceph.overall_status_int.last()}=2</expression>
+ <name>Ceph cluster in ERR state</name>
+ <url/>
+ <status>0</status>
+ <priority>5</priority>
+ <description>Ceph cluster is in ERR state</description>
+ <type>0</type>
+ <dependencies/>
+ </trigger>
+ <trigger>
+ <expression>{ceph-mgr Zabbix module:ceph.overall_status_int.avg(1h)}=1</expression>
+ <name>Ceph cluster in WARN state</name>
+ <url/>
+ <status>0</status>
+ <priority>4</priority>
+ <description>Issue a trigger if Ceph cluster is in WARN state for &gt;1h</description>
+ <type>0</type>
+ <dependencies/>
+ </trigger>
+ <trigger>
+ <expression>{ceph-mgr Zabbix module:ceph.num_osd_in.change()}&gt;0</expression>
+ <name>Number of IN OSDs decreased</name>
+ <url/>
+ <status>0</status>
+ <priority>2</priority>
+ <description>Amount of OSDs in IN state decreased</description>
+ <type>0</type>
+ <dependencies/>
+ </trigger>
+ <trigger>
+ <expression>{ceph-mgr Zabbix module:ceph.num_osd_up.change()}&gt;0</expression>
+ <name>Number of UP OSDs decreased</name>
+ <url/>
+ <status>0</status>
+ <priority>2</priority>
+ <description>Amount of OSDs in UP state decreased</description>
+ <type>0</type>
+ <dependencies/>
+ </trigger>
+ </triggers>
+ <graphs>
+ <graph>
+ <name>Ceph bandwidth</name>
+ <width>900</width>
+ <height>200</height>
+ <yaxismin>0.0000</yaxismin>
+ <yaxismax>100.0000</yaxismax>
+ <show_work_period>1</show_work_period>
+ <show_triggers>1</show_triggers>
+ <type>1</type>
+ <show_legend>1</show_legend>
+ <show_3d>0</show_3d>
+ <percent_left>0.0000</percent_left>
+ <percent_right>0.0000</percent_right>
+ <ymin_type_1>0</ymin_type_1>
+ <ymax_type_1>0</ymax_type_1>
+ <ymin_item_1>0</ymin_item_1>
+ <ymax_item_1>0</ymax_item_1>
+ <graph_items>
+ <graph_item>
+ <sortorder>0</sortorder>
+ <drawtype>0</drawtype>
+ <color>1A7C11</color>
+ <yaxisside>0</yaxisside>
+ <calc_fnc>4</calc_fnc>
+ <type>0</type>
+ <item>
+ <host>ceph-mgr Zabbix module</host>
+ <key>ceph.rd_bytes</key>
+ </item>
+ </graph_item>
+ <graph_item>
+ <sortorder>1</sortorder>
+ <drawtype>0</drawtype>
+ <color>F63100</color>
+ <yaxisside>0</yaxisside>
+ <calc_fnc>4</calc_fnc>
+ <type>0</type>
+ <item>
+ <host>ceph-mgr Zabbix module</host>
+ <key>ceph.wr_bytes</key>
+ </item>
+ </graph_item>
+ </graph_items>
+ </graph>
+ <graph>
+ <name>Ceph free space</name>
+ <width>900</width>
+ <height>200</height>
+ <yaxismin>0.0000</yaxismin>
+ <yaxismax>100.0000</yaxismax>
+ <show_work_period>1</show_work_period>
+ <show_triggers>1</show_triggers>
+ <type>0</type>
+ <show_legend>1</show_legend>
+ <show_3d>0</show_3d>
+ <percent_left>0.0000</percent_left>
+ <percent_right>0.0000</percent_right>
+ <ymin_type_1>1</ymin_type_1>
+ <ymax_type_1>2</ymax_type_1>
+ <ymin_item_1>0</ymin_item_1>
+ <ymax_item_1>
+ <host>ceph-mgr Zabbix module</host>
+ <key>ceph.total_bytes</key>
+ </ymax_item_1>
+ <graph_items>
+ <graph_item>
+ <sortorder>0</sortorder>
+ <drawtype>0</drawtype>
+ <color>00AA00</color>
+ <yaxisside>0</yaxisside>
+ <calc_fnc>4</calc_fnc>
+ <type>0</type>
+ <item>
+ <host>ceph-mgr Zabbix module</host>
+ <key>ceph.total_avail_bytes</key>
+ </item>
+ </graph_item>
+ <graph_item>
+ <sortorder>1</sortorder>
+ <drawtype>0</drawtype>
+ <color>DD0000</color>
+ <yaxisside>0</yaxisside>
+ <calc_fnc>4</calc_fnc>
+ <type>0</type>
+ <item>
+ <host>ceph-mgr Zabbix module</host>
+ <key>ceph.total_used_bytes</key>
+ </item>
+ </graph_item>
+ </graph_items>
+ </graph>
+ <graph>
+ <name>Ceph health</name>
+ <width>900</width>
+ <height>200</height>
+ <yaxismin>0.0000</yaxismin>
+ <yaxismax>2.0000</yaxismax>
+ <show_work_period>1</show_work_period>
+ <show_triggers>1</show_triggers>
+ <type>0</type>
+ <show_legend>1</show_legend>
+ <show_3d>0</show_3d>
+ <percent_left>0.0000</percent_left>
+ <percent_right>0.0000</percent_right>
+ <ymin_type_1>1</ymin_type_1>
+ <ymax_type_1>1</ymax_type_1>
+ <ymin_item_1>0</ymin_item_1>
+ <ymax_item_1>0</ymax_item_1>
+ <graph_items>
+ <graph_item>
+ <sortorder>0</sortorder>
+ <drawtype>0</drawtype>
+ <color>1A7C11</color>
+ <yaxisside>0</yaxisside>
+ <calc_fnc>7</calc_fnc>
+ <type>0</type>
+ <item>
+ <host>ceph-mgr Zabbix module</host>
+ <key>ceph.overall_status_int</key>
+ </item>
+ </graph_item>
+ </graph_items>
+ </graph>
+ <graph>
+ <name>Ceph I/O</name>
+ <width>900</width>
+ <height>200</height>
+ <yaxismin>0.0000</yaxismin>
+ <yaxismax>100.0000</yaxismax>
+ <show_work_period>1</show_work_period>
+ <show_triggers>1</show_triggers>
+ <type>1</type>
+ <show_legend>1</show_legend>
+ <show_3d>0</show_3d>
+ <percent_left>0.0000</percent_left>
+ <percent_right>0.0000</percent_right>
+ <ymin_type_1>1</ymin_type_1>
+ <ymax_type_1>0</ymax_type_1>
+ <ymin_item_1>0</ymin_item_1>
+ <ymax_item_1>0</ymax_item_1>
+ <graph_items>
+ <graph_item>
+ <sortorder>0</sortorder>
+ <drawtype>0</drawtype>
+ <color>1A7C11</color>
+ <yaxisside>0</yaxisside>
+ <calc_fnc>4</calc_fnc>
+ <type>0</type>
+ <item>
+ <host>ceph-mgr Zabbix module</host>
+ <key>ceph.rd_ops</key>
+ </item>
+ </graph_item>
+ <graph_item>
+ <sortorder>1</sortorder>
+ <drawtype>0</drawtype>
+ <color>F63100</color>
+ <yaxisside>0</yaxisside>
+ <calc_fnc>4</calc_fnc>
+ <type>0</type>
+ <item>
+ <host>ceph-mgr Zabbix module</host>
+ <key>ceph.wr_ops</key>
+ </item>
+ </graph_item>
+ </graph_items>
+ </graph>
+ <graph>
+ <name>Ceph OSD latency</name>
+ <width>900</width>
+ <height>200</height>
+ <yaxismin>0.0000</yaxismin>
+ <yaxismax>100.0000</yaxismax>
+ <show_work_period>1</show_work_period>
+ <show_triggers>1</show_triggers>
+ <type>0</type>
+ <show_legend>1</show_legend>
+ <show_3d>0</show_3d>
+ <percent_left>0.0000</percent_left>
+ <percent_right>0.0000</percent_right>
+ <ymin_type_1>0</ymin_type_1>
+ <ymax_type_1>0</ymax_type_1>
+ <ymin_item_1>0</ymin_item_1>
+ <ymax_item_1>0</ymax_item_1>
+ <graph_items>
+ <graph_item>
+ <sortorder>0</sortorder>
+ <drawtype>0</drawtype>
+ <color>1A7C11</color>
+ <yaxisside>0</yaxisside>
+ <calc_fnc>4</calc_fnc>
+ <type>0</type>
+ <item>
+ <host>ceph-mgr Zabbix module</host>
+ <key>ceph.osd_latency_apply_avg</key>
+ </item>
+ </graph_item>
+ <graph_item>
+ <sortorder>1</sortorder>
+ <drawtype>0</drawtype>
+ <color>F63100</color>
+ <yaxisside>0</yaxisside>
+ <calc_fnc>4</calc_fnc>
+ <type>0</type>
+ <item>
+ <host>ceph-mgr Zabbix module</host>
+ <key>ceph.osd_latency_commit_avg</key>
+ </item>
+ </graph_item>
+ <graph_item>
+ <sortorder>2</sortorder>
+ <drawtype>0</drawtype>
+ <color>2774A4</color>
+ <yaxisside>0</yaxisside>
+ <calc_fnc>4</calc_fnc>
+ <type>0</type>
+ <item>
+ <host>ceph-mgr Zabbix module</host>
+ <key>ceph.osd_latency_apply_max</key>
+ </item>
+ </graph_item>
+ <graph_item>
+ <sortorder>3</sortorder>
+ <drawtype>0</drawtype>
+ <color>A54F10</color>
+ <yaxisside>0</yaxisside>
+ <calc_fnc>4</calc_fnc>
+ <type>0</type>
+ <item>
+ <host>ceph-mgr Zabbix module</host>
+ <key>ceph.osd_latency_commit_max</key>
+ </item>
+ </graph_item>
+ <graph_item>
+ <sortorder>4</sortorder>
+ <drawtype>0</drawtype>
+ <color>FC6EA3</color>
+ <yaxisside>0</yaxisside>
+ <calc_fnc>4</calc_fnc>
+ <type>0</type>
+ <item>
+ <host>ceph-mgr Zabbix module</host>
+ <key>ceph.osd_latency_apply_min</key>
+ </item>
+ </graph_item>
+ <graph_item>
+ <sortorder>5</sortorder>
+ <drawtype>0</drawtype>
+ <color>6C59DC</color>
+ <yaxisside>0</yaxisside>
+ <calc_fnc>4</calc_fnc>
+ <type>0</type>
+ <item>
+ <host>ceph-mgr Zabbix module</host>
+ <key>ceph.osd_latency_commit_min</key>
+ </item>
+ </graph_item>
+ </graph_items>
+ </graph>
+ <graph>
+ <name>Ceph OSD utilization</name>
+ <width>900</width>
+ <height>200</height>
+ <yaxismin>0.0000</yaxismin>
+ <yaxismax>100.0000</yaxismax>
+ <show_work_period>1</show_work_period>
+ <show_triggers>1</show_triggers>
+ <type>0</type>
+ <show_legend>1</show_legend>
+ <show_3d>0</show_3d>
+ <percent_left>0.0000</percent_left>
+ <percent_right>0.0000</percent_right>
+ <ymin_type_1>1</ymin_type_1>
+ <ymax_type_1>1</ymax_type_1>
+ <ymin_item_1>0</ymin_item_1>
+ <ymax_item_1>0</ymax_item_1>
+ <graph_items>
+ <graph_item>
+ <sortorder>0</sortorder>
+ <drawtype>0</drawtype>
+ <color>0000CC</color>
+ <yaxisside>0</yaxisside>
+ <calc_fnc>2</calc_fnc>
+ <type>0</type>
+ <item>
+ <host>ceph-mgr Zabbix module</host>
+ <key>ceph.osd_nearfull_ratio</key>
+ </item>
+ </graph_item>
+ <graph_item>
+ <sortorder>1</sortorder>
+ <drawtype>0</drawtype>
+ <color>F63100</color>
+ <yaxisside>0</yaxisside>
+ <calc_fnc>2</calc_fnc>
+ <type>0</type>
+ <item>
+ <host>ceph-mgr Zabbix module</host>
+ <key>ceph.osd_full_ratio</key>
+ </item>
+ </graph_item>
+ <graph_item>
+ <sortorder>2</sortorder>
+ <drawtype>0</drawtype>
+ <color>CC00CC</color>
+ <yaxisside>0</yaxisside>
+ <calc_fnc>2</calc_fnc>
+ <type>0</type>
+ <item>
+ <host>ceph-mgr Zabbix module</host>
+ <key>ceph.osd_backfillfull_ratio</key>
+ </item>
+ </graph_item>
+ <graph_item>
+ <sortorder>3</sortorder>
+ <drawtype>0</drawtype>
+ <color>A54F10</color>
+ <yaxisside>0</yaxisside>
+ <calc_fnc>2</calc_fnc>
+ <type>0</type>
+ <item>
+ <host>ceph-mgr Zabbix module</host>
+ <key>ceph.osd_max_fill</key>
+ </item>
+ </graph_item>
+ <graph_item>
+ <sortorder>4</sortorder>
+ <drawtype>0</drawtype>
+ <color>FC6EA3</color>
+ <yaxisside>0</yaxisside>
+ <calc_fnc>2</calc_fnc>
+ <type>0</type>
+ <item>
+ <host>ceph-mgr Zabbix module</host>
+ <key>ceph.osd_avg_fill</key>
+ </item>
+ </graph_item>
+ <graph_item>
+ <sortorder>5</sortorder>
+ <drawtype>0</drawtype>
+ <color>6C59DC</color>
+ <yaxisside>0</yaxisside>
+ <calc_fnc>2</calc_fnc>
+ <type>0</type>
+ <item>
+ <host>ceph-mgr Zabbix module</host>
+ <key>ceph.osd_min_fill</key>
+ </item>
+ </graph_item>
+ </graph_items>
+ </graph>
+ <graph>
+ <name>Ceph storage overview</name>
+ <width>900</width>
+ <height>200</height>
+ <yaxismin>0.0000</yaxismin>
+ <yaxismax>0.0000</yaxismax>
+ <show_work_period>0</show_work_period>
+ <show_triggers>0</show_triggers>
+ <type>2</type>
+ <show_legend>1</show_legend>
+ <show_3d>0</show_3d>
+ <percent_left>0.0000</percent_left>
+ <percent_right>0.0000</percent_right>
+ <ymin_type_1>0</ymin_type_1>
+ <ymax_type_1>0</ymax_type_1>
+ <ymin_item_1>0</ymin_item_1>
+ <ymax_item_1>0</ymax_item_1>
+ <graph_items>
+ <graph_item>
+ <sortorder>0</sortorder>
+ <drawtype>0</drawtype>
+ <color>F63100</color>
+ <yaxisside>0</yaxisside>
+ <calc_fnc>2</calc_fnc>
+ <type>0</type>
+ <item>
+ <host>ceph-mgr Zabbix module</host>
+ <key>ceph.total_used_bytes</key>
+ </item>
+ </graph_item>
+ <graph_item>
+ <sortorder>1</sortorder>
+ <drawtype>0</drawtype>
+ <color>00CC00</color>
+ <yaxisside>0</yaxisside>
+ <calc_fnc>2</calc_fnc>
+ <type>0</type>
+ <item>
+ <host>ceph-mgr Zabbix module</host>
+ <key>ceph.total_avail_bytes</key>
+ </item>
+ </graph_item>
+ </graph_items>
+ </graph>
+ </graphs>
+</zabbix_export>