summaryrefslogtreecommitdiffstats
path: root/src/pybind/mgr/zabbix
diff options
context:
space:
mode:
Diffstat (limited to 'src/pybind/mgr/zabbix')
-rw-r--r--src/pybind/mgr/zabbix/__init__.py1
-rw-r--r--src/pybind/mgr/zabbix/module.py476
-rw-r--r--src/pybind/mgr/zabbix/zabbix_template.xml3249
3 files changed, 3726 insertions, 0 deletions
diff --git a/src/pybind/mgr/zabbix/__init__.py b/src/pybind/mgr/zabbix/__init__.py
new file mode 100644
index 000000000..8f210ac92
--- /dev/null
+++ b/src/pybind/mgr/zabbix/__init__.py
@@ -0,0 +1 @@
+from .module import Module
diff --git a/src/pybind/mgr/zabbix/module.py b/src/pybind/mgr/zabbix/module.py
new file mode 100644
index 000000000..638b68856
--- /dev/null
+++ b/src/pybind/mgr/zabbix/module.py
@@ -0,0 +1,476 @@
+"""
+Zabbix module for ceph-mgr
+
+Collect statistics from Ceph cluster and every X seconds send data to a Zabbix
+server using the zabbix_sender executable.
+"""
+import logging
+import json
+import errno
+import re
+from subprocess import Popen, PIPE
+from threading import Event
+from mgr_module import CLIReadCommand, CLIWriteCommand, MgrModule, Option, OptionValue
+from typing import cast, Any, Dict, List, Mapping, Optional, Sequence, Tuple, Union
+
+
+def avg(data: Sequence[Union[int, float]]) -> float:
+ if len(data):
+ return sum(data) / float(len(data))
+ else:
+ return 0
+
+
+class ZabbixSender(object):
+ def __init__(self, sender: str, host: str, port: int, log: logging.Logger) -> None:
+ self.sender = sender
+ self.host = host
+ self.port = port
+ self.log = log
+
+ def send(self, hostname: str, data: Mapping[str, Union[int, float, str]]) -> None:
+ if len(data) == 0:
+ return
+
+ cmd = [self.sender, '-z', self.host, '-p', str(self.port), '-s',
+ hostname, '-vv', '-i', '-']
+
+ self.log.debug('Executing: %s', cmd)
+
+ proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, encoding='utf-8')
+
+ for key, value in data.items():
+ assert proc.stdin
+ proc.stdin.write('{0} ceph.{1} {2}\n'.format(hostname, key, value))
+
+ stdout, stderr = proc.communicate()
+ if proc.returncode != 0:
+ raise RuntimeError('%s exited non-zero: %s' % (self.sender,
+ stderr))
+
+ self.log.debug('Zabbix Sender: %s', stdout.rstrip())
+
+
+class Module(MgrModule):
+ run = False
+ config: Dict[str, OptionValue] = {}
+ ceph_health_mapping = {'HEALTH_OK': 0, 'HEALTH_WARN': 1, 'HEALTH_ERR': 2}
+ _zabbix_hosts: List[Dict[str, Union[str, int]]] = list()
+
+ @property
+ def config_keys(self) -> Dict[str, OptionValue]:
+ return dict((o['name'], o.get('default', None))
+ for o in self.MODULE_OPTIONS)
+
+ MODULE_OPTIONS = [
+ Option(
+ name='zabbix_sender',
+ default='/usr/bin/zabbix_sender'),
+ Option(
+ name='zabbix_host',
+ type='str',
+ default=None),
+ Option(
+ name='zabbix_port',
+ type='int',
+ default=10051),
+ Option(
+ name='identifier',
+ default=""),
+ Option(
+ name='interval',
+ type='secs',
+ default=60),
+ Option(
+ name='discovery_interval',
+ type='uint',
+ default=100)
+ ]
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super(Module, self).__init__(*args, **kwargs)
+ self.event = Event()
+
+ def init_module_config(self) -> None:
+ self.fsid = self.get('mon_map')['fsid']
+ self.log.debug('Found Ceph fsid %s', self.fsid)
+
+ for key, default in self.config_keys.items():
+ self.set_config_option(key, self.get_module_option(key, default))
+
+ if self.config['zabbix_host']:
+ self._parse_zabbix_hosts()
+
+ def set_config_option(self, option: str, value: OptionValue) -> bool:
+ if option not in self.config_keys.keys():
+ raise RuntimeError('{0} is a unknown configuration '
+ 'option'.format(option))
+
+ if option in ['zabbix_port', 'interval', 'discovery_interval']:
+ try:
+ int_value = int(value) # type: ignore
+ except (ValueError, TypeError):
+ raise RuntimeError('invalid {0} configured. Please specify '
+ 'a valid integer'.format(option))
+
+ if option == 'interval' and int_value < 10:
+ raise RuntimeError('interval should be set to at least 10 seconds')
+
+ if option == 'discovery_interval' and int_value < 10:
+ raise RuntimeError(
+ "discovery_interval should not be more frequent "
+ "than once in 10 regular data collection"
+ )
+
+ self.log.debug('Setting in-memory config option %s to: %s', option,
+ value)
+ self.config[option] = value
+ return True
+
+ def _parse_zabbix_hosts(self) -> None:
+ self._zabbix_hosts = list()
+ servers = cast(str, self.config['zabbix_host']).split(",")
+ for server in servers:
+ uri = re.match("(?:(?:\[?)([a-z0-9-\.]+|[a-f0-9:\.]+)(?:\]?))(?:((?::))([0-9]{1,5}))?$", server)
+ if uri:
+ zabbix_host, sep, opt_zabbix_port = uri.groups()
+ if sep == ':':
+ zabbix_port = int(opt_zabbix_port)
+ else:
+ zabbix_port = cast(int, self.config['zabbix_port'])
+ self._zabbix_hosts.append({'zabbix_host': zabbix_host, 'zabbix_port': zabbix_port})
+ else:
+ self.log.error('Zabbix host "%s" is not valid', server)
+
+ self.log.error('Parsed Zabbix hosts: %s', self._zabbix_hosts)
+
+ def get_pg_stats(self) -> Dict[str, int]:
+ stats = dict()
+
+ pg_states = ['active', 'peering', 'clean', 'scrubbing', 'undersized',
+ 'backfilling', 'recovering', 'degraded', 'inconsistent',
+ 'remapped', 'backfill_toofull', 'backfill_wait',
+ 'recovery_wait']
+
+ for state in pg_states:
+ stats['num_pg_{0}'.format(state)] = 0
+
+ pg_status = self.get('pg_status')
+
+ stats['num_pg'] = pg_status['num_pgs']
+
+ for state in pg_status['pgs_by_state']:
+ states = state['state_name'].split('+')
+ for s in pg_states:
+ key = 'num_pg_{0}'.format(s)
+ if s in states:
+ stats[key] += state['count']
+
+ return stats
+
+ def get_data(self) -> Dict[str, Union[int, float]]:
+ data = dict()
+
+ health = json.loads(self.get('health')['json'])
+ # 'status' is luminous+, 'overall_status' is legacy mode.
+ data['overall_status'] = health.get('status',
+ health.get('overall_status'))
+ data['overall_status_int'] = \
+ self.ceph_health_mapping.get(data['overall_status'])
+
+ mon_status = json.loads(self.get('mon_status')['json'])
+ data['num_mon'] = len(mon_status['monmap']['mons'])
+
+ df = self.get('df')
+ data['num_pools'] = len(df['pools'])
+ data['total_used_bytes'] = df['stats']['total_used_bytes']
+ data['total_bytes'] = df['stats']['total_bytes']
+ data['total_avail_bytes'] = df['stats']['total_avail_bytes']
+
+ wr_ops = 0
+ rd_ops = 0
+ wr_bytes = 0
+ rd_bytes = 0
+
+ for pool in df['pools']:
+ wr_ops += pool['stats']['wr']
+ rd_ops += pool['stats']['rd']
+ wr_bytes += pool['stats']['wr_bytes']
+ rd_bytes += pool['stats']['rd_bytes']
+ data['[{0},rd_bytes]'.format(pool['name'])] = pool['stats']['rd_bytes']
+ data['[{0},wr_bytes]'.format(pool['name'])] = pool['stats']['wr_bytes']
+ data['[{0},rd_ops]'.format(pool['name'])] = pool['stats']['rd']
+ data['[{0},wr_ops]'.format(pool['name'])] = pool['stats']['wr']
+ data['[{0},bytes_used]'.format(pool['name'])] = pool['stats']['bytes_used']
+ data['[{0},stored_raw]'.format(pool['name'])] = pool['stats']['stored_raw']
+ data['[{0},percent_used]'.format(pool['name'])] = pool['stats']['percent_used'] * 100
+
+ data['wr_ops'] = wr_ops
+ data['rd_ops'] = rd_ops
+ data['wr_bytes'] = wr_bytes
+ data['rd_bytes'] = rd_bytes
+
+ osd_map = self.get('osd_map')
+ data['num_osd'] = len(osd_map['osds'])
+ data['osd_nearfull_ratio'] = osd_map['nearfull_ratio']
+ data['osd_full_ratio'] = osd_map['full_ratio']
+ data['osd_backfillfull_ratio'] = osd_map['backfillfull_ratio']
+
+ data['num_pg_temp'] = len(osd_map['pg_temp'])
+
+ num_up = 0
+ num_in = 0
+ for osd in osd_map['osds']:
+ data['[osd.{0},up]'.format(int(osd['osd']))] = osd['up']
+ if osd['up'] == 1:
+ num_up += 1
+
+ data['[osd.{0},in]'.format(int(osd['osd']))] = osd['in']
+ if osd['in'] == 1:
+ num_in += 1
+
+ data['num_osd_up'] = num_up
+ data['num_osd_in'] = num_in
+
+ osd_fill = list()
+ osd_pgs = list()
+ osd_apply_latency_ns = list()
+ osd_commit_latency_ns = list()
+
+ osd_stats = self.get('osd_stats')
+ for osd in osd_stats['osd_stats']:
+ try:
+ osd_fill.append((float(osd['kb_used']) / float(osd['kb'])) * 100)
+ data['[osd.{0},osd_fill]'.format(osd['osd'])] = (
+ float(osd['kb_used']) / float(osd['kb'])) * 100
+ except ZeroDivisionError:
+ continue
+ osd_pgs.append(osd['num_pgs'])
+ osd_apply_latency_ns.append(osd['perf_stat']['apply_latency_ns'])
+ osd_commit_latency_ns.append(osd['perf_stat']['commit_latency_ns'])
+ data['[osd.{0},num_pgs]'.format(osd['osd'])] = osd['num_pgs']
+ data[
+ '[osd.{0},osd_latency_apply]'.format(osd['osd'])
+ ] = osd['perf_stat']['apply_latency_ns'] / 1000000.0 # ns -> ms
+ data[
+ '[osd.{0},osd_latency_commit]'.format(osd['osd'])
+ ] = osd['perf_stat']['commit_latency_ns'] / 1000000.0 # ns -> ms
+
+ try:
+ data['osd_max_fill'] = max(osd_fill)
+ data['osd_min_fill'] = min(osd_fill)
+ data['osd_avg_fill'] = avg(osd_fill)
+ data['osd_max_pgs'] = max(osd_pgs)
+ data['osd_min_pgs'] = min(osd_pgs)
+ data['osd_avg_pgs'] = avg(osd_pgs)
+ except ValueError:
+ pass
+
+ try:
+ data['osd_latency_apply_max'] = max(osd_apply_latency_ns) / 1000000.0 # ns -> ms
+ data['osd_latency_apply_min'] = min(osd_apply_latency_ns) / 1000000.0 # ns -> ms
+ data['osd_latency_apply_avg'] = avg(osd_apply_latency_ns) / 1000000.0 # ns -> ms
+
+ data['osd_latency_commit_max'] = max(osd_commit_latency_ns) / 1000000.0 # ns -> ms
+ data['osd_latency_commit_min'] = min(osd_commit_latency_ns) / 1000000.0 # ns -> ms
+ data['osd_latency_commit_avg'] = avg(osd_commit_latency_ns) / 1000000.0 # ns -> ms
+ except ValueError:
+ pass
+
+ data.update(self.get_pg_stats())
+
+ return data
+
+ def send(self, data: Mapping[str, Union[int, float, str]]) -> bool:
+ identifier = cast(Optional[str], self.config['identifier'])
+ if identifier is None or len(identifier) == 0:
+ identifier = 'ceph-{0}'.format(self.fsid)
+
+ if not self.config['zabbix_host'] or not self._zabbix_hosts:
+ self.log.error('Zabbix server not set, please configure using: '
+ 'ceph zabbix config-set zabbix_host <zabbix_host>')
+ self.set_health_checks({
+ 'MGR_ZABBIX_NO_SERVER': {
+ 'severity': 'warning',
+ 'summary': 'No Zabbix server configured',
+ 'detail': ['Configuration value zabbix_host not configured']
+ }
+ })
+ return False
+
+ result = True
+
+ for server in self._zabbix_hosts:
+ self.log.info(
+ 'Sending data to Zabbix server %s, port %s as host/identifier %s',
+ server['zabbix_host'], server['zabbix_port'], identifier)
+ self.log.debug(data)
+
+ try:
+ zabbix = ZabbixSender(cast(str, self.config['zabbix_sender']),
+ cast(str, server['zabbix_host']),
+ cast(int, server['zabbix_port']), self.log)
+ zabbix.send(identifier, data)
+ except Exception as exc:
+ self.log.exception('Failed to send.')
+ self.set_health_checks({
+ 'MGR_ZABBIX_SEND_FAILED': {
+ 'severity': 'warning',
+ 'summary': 'Failed to send data to Zabbix',
+ 'detail': [str(exc)]
+ }
+ })
+ result = False
+
+ self.set_health_checks(dict())
+ return result
+
+ def discovery(self) -> bool:
+ osd_map = self.get('osd_map')
+ osd_map_crush = self.get('osd_map_crush')
+
+ # Discovering ceph pools
+ pool_discovery = {
+ pool['pool_name']: step['item_name']
+ for pool in osd_map['pools']
+ for rule in osd_map_crush['rules'] if rule['rule_id'] == pool['crush_rule']
+ for step in rule['steps'] if step['op'] == "take"
+ }
+ pools_discovery_data = {"data": [
+ {
+ "{#POOL}": pool,
+ "{#CRUSH_RULE}": rule
+ }
+ for pool, rule in pool_discovery.items()
+ ]}
+
+ # Discovering OSDs
+ # Getting hosts for found crush rules
+ osd_roots = {
+ step['item_name']: [
+ item['id']
+ for item in root_bucket['items']
+ ]
+ for rule in osd_map_crush['rules']
+ for step in rule['steps'] if step['op'] == "take"
+ for root_bucket in osd_map_crush['buckets']
+ if root_bucket['id'] == step['item']
+ }
+ # Getting osds for hosts with map to crush_rule
+ osd_discovery = {
+ item['id']: crush_rule
+ for crush_rule, roots in osd_roots.items()
+ for root in roots
+ for bucket in osd_map_crush['buckets']
+ if bucket['id'] == root
+ for item in bucket['items']
+ }
+ osd_discovery_data = {"data": [
+ {
+ "{#OSD}": osd,
+ "{#CRUSH_RULE}": rule
+ }
+ for osd, rule in osd_discovery.items()
+ ]}
+ # Preparing recieved data for sending
+ data = {
+ "zabbix.pool.discovery": json.dumps(pools_discovery_data),
+ "zabbix.osd.discovery": json.dumps(osd_discovery_data)
+ }
+ return bool(self.send(data))
+
+ @CLIReadCommand('zabbix config-show')
+ def config_show(self) -> Tuple[int, str, str]:
+ """
+ Show current configuration
+ """
+ return 0, json.dumps(self.config, indent=4, sort_keys=True), ''
+
+ @CLIWriteCommand('zabbix config-set')
+ def config_set(self, key: str, value: str) -> Tuple[int, str, str]:
+ """
+ Set a configuration value
+ """
+ if not value:
+ return -errno.EINVAL, '', 'Value should not be empty or None'
+
+ self.log.debug('Setting configuration option %s to %s', key, value)
+ if self.set_config_option(key, value):
+ self.set_module_option(key, value)
+ if key == 'zabbix_host' or key == 'zabbix_port':
+ self._parse_zabbix_hosts()
+ return 0, 'Configuration option {0} updated'.format(key), ''
+ return 1,\
+ 'Failed to update configuration option {0}'.format(key), ''
+
+ @CLIReadCommand('zabbix send')
+ def do_send(self) -> Tuple[int, str, str]:
+ """
+ Force sending data to Zabbix
+ """
+ data = self.get_data()
+ if self.send(data):
+ return 0, 'Sending data to Zabbix', ''
+
+ return 1, 'Failed to send data to Zabbix', ''
+
+ @CLIReadCommand('zabbix discovery')
+ def do_discovery(self) -> Tuple[int, str, str]:
+ """
+ Discovering Zabbix data
+ """
+ if self.discovery():
+ return 0, 'Sending discovery data to Zabbix', ''
+
+ return 1, 'Failed to send discovery data to Zabbix', ''
+
+ def shutdown(self) -> None:
+ self.log.info('Stopping zabbix')
+ self.run = False
+ self.event.set()
+
+ def serve(self) -> None:
+ self.log.info('Zabbix module starting up')
+ self.run = True
+
+ self.init_module_config()
+
+ discovery_interval = self.config['discovery_interval']
+ # We are sending discovery once plugin is loaded
+ discovery_counter = cast(int, discovery_interval)
+ while self.run:
+ self.log.debug('Waking up for new iteration')
+
+ if discovery_counter == discovery_interval:
+ try:
+ self.discovery()
+ except Exception:
+ # Shouldn't happen, but let's log it and retry next interval,
+ # rather than dying completely.
+ self.log.exception("Unexpected error during discovery():")
+ finally:
+ discovery_counter = 0
+
+ try:
+ data = self.get_data()
+ self.send(data)
+ except Exception:
+ # Shouldn't happen, but let's log it and retry next interval,
+ # rather than dying completely.
+ self.log.exception("Unexpected error during send():")
+
+ interval = cast(float, self.config['interval'])
+ self.log.debug('Sleeping for %d seconds', interval)
+ discovery_counter += 1
+ self.event.wait(interval)
+
+ def self_test(self) -> None:
+ data = self.get_data()
+
+ if data['overall_status'] not in self.ceph_health_mapping:
+ raise RuntimeError('No valid overall_status found in data')
+
+ int(data['overall_status_int'])
+
+ if data['num_mon'] < 1:
+ raise RuntimeError('num_mon is smaller than 1')
diff --git a/src/pybind/mgr/zabbix/zabbix_template.xml b/src/pybind/mgr/zabbix/zabbix_template.xml
new file mode 100644
index 000000000..3b933bcf3
--- /dev/null
+++ b/src/pybind/mgr/zabbix/zabbix_template.xml
@@ -0,0 +1,3249 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<zabbix_export>
+ <version>3.0</version>
+ <date>2019-01-25T10:12:41Z</date>
+ <groups>
+ <group>
+ <name>Templates</name>
+ </group>
+ </groups>
+ <templates>
+ <template>
+ <template>ceph-mgr Zabbix module</template>
+ <name>ceph-mgr Zabbix module</name>
+ <description/>
+ <groups>
+ <group>
+ <name>Templates</name>
+ </group>
+ </groups>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <items>
+ <item>
+ <name>Number of Monitors</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.num_mon</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Number of Monitors configured in Ceph cluster</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Number of OSDs</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.num_osd</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Number of OSDs in Ceph cluster</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Number of OSDs in state: IN</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.num_osd_in</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Total number of IN OSDs in Ceph cluster</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Number of OSDs in state: UP</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.num_osd_up</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Total number of UP OSDs in Ceph cluster</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Number of Placement Groups</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.num_pg</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Total number of Placement Groups in Ceph cluster</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Number of Placement Groups in Temporary state</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.num_pg_temp</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Total number of Placement Groups in pg_temp state</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Number of Placement Groups in Active state</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.num_pg_active</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Total number of Placement Groups in active state</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Number of Placement Groups in Clean state</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.num_pg_clean</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Total number of Placement Groups in clean state</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Number of Placement Groups in Peering state</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.num_pg_peering</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Total number of Placement Groups in peering state</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Number of Placement Groups in Scrubbing state</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.num_pg_scrubbing</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Total number of Placement Groups in scrubbing state</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Number of Placement Groups in Undersized state</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.num_pg_undersized</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Total number of Placement Groups in undersized state</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Number of Placement Groups in Backfilling state</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.num_pg_backfilling</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Total number of Placement Groups in backfilling state</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Number of Placement Groups in degraded state</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.num_pg_degraded</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Total number of Placement Groups in degraded state</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Number of Placement Groups in inconsistent state</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.num_pg_inconsistent</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Total number of Placement Groups in inconsistent state</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Number of Placement Groups in remapped state</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.num_pg_remapped</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Total number of Placement Groups in remapped state</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Number of Placement Groups in recovering state</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.num_pg_recovering</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Total number of Placement Groups in recovering state</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Number of Placement Groups in backfill_toofull state</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.num_pg_backfill_toofull</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Total number of Placement Groups in backfill_toofull state</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Number of Placement Groups in backfill_wait state</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.num_pg_backfill_wait</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Total number of Placement Groups in backfill_wait state</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Number of Placement Groups in recovery_wait state</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.num_pg_recovery_wait</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Total number of Placement Groups in recovery_wait state</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Number of Pools</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.num_pools</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Total number of pools in Ceph cluster</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Ceph OSD avg fill</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.osd_avg_fill</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>0</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Average fill of OSDs</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Ceph OSD max PGs</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.osd_max_pgs</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>0</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Maximum amount of PGs on OSDs</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Ceph OSD min PGs</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.osd_min_pgs</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>0</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Minimum amount of PGs on OSDs</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Ceph OSD avg PGs</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.osd_avg_pgs</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>0</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Average amount of PGs on OSDs</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Ceph backfill full ratio</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>1</multiplier>
+ <snmp_oid/>
+ <key>ceph.osd_backfillfull_ratio</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>0</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>100</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Backfill full ratio setting of Ceph cluster as configured on OSDMap</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Ceph full ratio</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>1</multiplier>
+ <snmp_oid/>
+ <key>ceph.osd_full_ratio</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>0</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>100</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Full ratio setting of Ceph cluster as configured on OSDMap</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Ceph OSD Apply latency Avg</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.osd_latency_apply_avg</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>0</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Average apply latency of OSDs</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Ceph OSD Apply latency Max</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.osd_latency_apply_max</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>0</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Maximum apply latency of OSDs</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Ceph OSD Apply latency Min</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.osd_latency_apply_min</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>0</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Miniumum apply latency of OSDs</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Ceph OSD Commit latency Avg</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.osd_latency_commit_avg</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>0</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Average commit latency of OSDs</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Ceph OSD Commit latency Max</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.osd_latency_commit_max</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>0</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Maximum commit latency of OSDs</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Ceph OSD Commit latency Min</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.osd_latency_commit_min</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>0</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Minimum commit latency of OSDs</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Ceph OSD max fill</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.osd_max_fill</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>0</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Percentage fill of maximum filled OSD</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Ceph OSD min fill</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.osd_min_fill</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>0</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Percentage fill of minimum filled OSD</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Ceph nearfull ratio</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>1</multiplier>
+ <snmp_oid/>
+ <key>ceph.osd_nearfull_ratio</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>0</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>100</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Near full ratio setting of Ceph cluster as configured on OSDMap</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Overall Ceph status</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.overall_status</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>0</trends>
+ <status>0</status>
+ <value_type>4</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Overall Ceph cluster status, eg HEALTH_OK, HEALTH_WARN of HEALTH_ERR</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Overal Ceph status (numeric)</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.overall_status_int</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Overal Ceph status in numeric value. OK: 0, WARN: 1, ERR: 2</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Ceph Read bandwidth</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.rd_bytes</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units>b</units>
+ <delta>1</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Global read bandwidth</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Ceph Read operations</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.rd_ops</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>1</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Global read operations per second</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Total bytes available</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.total_avail_bytes</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units>B</units>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Total bytes available in Ceph cluster</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Total bytes</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.total_bytes</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units>B</units>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Total (RAW) capacity of Ceph cluster in bytes</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Total number of objects</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.total_objects</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Total number of objects in Ceph cluster</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Total bytes used</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.total_used_bytes</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units>B</units>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Total bytes used in Ceph cluster</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Ceph Write bandwidth</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.wr_bytes</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units>b</units>
+ <delta>1</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Global write bandwidth</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ <item>
+ <name>Ceph Write operations</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.wr_ops</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>1</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description>Global write operations per second</description>
+ <inventory_link>0</inventory_link>
+ <applications>
+ <application>
+ <name>Ceph</name>
+ </application>
+ </applications>
+ <valuemap/>
+ <logtimefmt/>
+ </item>
+ </items>
+ <discovery_rules>
+ <discovery_rule>
+ <name>Ceph OSD discovery</name>
+ <type>2</type>
+ <snmp_community/>
+ <snmp_oid/>
+ <key>ceph.zabbix.osd.discovery</key>
+ <delay>0</delay>
+ <status>0</status>
+ <allowed_hosts/>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <filter>
+ <evaltype>0</evaltype>
+ <formula/>
+ <conditions/>
+ </filter>
+ <lifetime>90</lifetime>
+ <description/>
+ <item_prototypes>
+ <item_prototype>
+ <name>[osd.{#OSD}] OSD in</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.[osd.{#OSD},in]</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description/>
+ <inventory_link>0</inventory_link>
+ <applications/>
+ <valuemap/>
+ <logtimefmt/>
+ <application_prototypes>
+ <application_prototype>
+ <name>Ceph CRUSH [{#CRUSH_RULE}]</name>
+ </application_prototype>
+ </application_prototypes>
+ </item_prototype>
+ <item_prototype>
+ <name>[osd.{#OSD}] OSD PGs</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.[osd.{#OSD},num_pgs]</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description/>
+ <inventory_link>0</inventory_link>
+ <applications/>
+ <valuemap/>
+ <logtimefmt/>
+ <application_prototypes>
+ <application_prototype>
+ <name>Ceph CRUSH [{#CRUSH_RULE}]</name>
+ </application_prototype>
+ </application_prototypes>
+ </item_prototype>
+ <item_prototype>
+ <name>[osd.{#OSD}] OSD fill</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.[osd.{#OSD},osd_fill]</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>0</value_type>
+ <allowed_hosts/>
+ <units>%</units>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description/>
+ <inventory_link>0</inventory_link>
+ <applications/>
+ <valuemap/>
+ <logtimefmt/>
+ <application_prototypes>
+ <application_prototype>
+ <name>Ceph CRUSH [{#CRUSH_RULE}]</name>
+ </application_prototype>
+ </application_prototypes>
+ </item_prototype>
+ <item_prototype>
+ <name>[osd.{#OSD}] OSD latency apply</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.[osd.{#OSD},osd_latency_apply]</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>0</value_type>
+ <allowed_hosts/>
+ <units>ms</units>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description/>
+ <inventory_link>0</inventory_link>
+ <applications/>
+ <valuemap/>
+ <logtimefmt/>
+ <application_prototypes>
+ <application_prototype>
+ <name>Ceph CRUSH [{#CRUSH_RULE}]</name>
+ </application_prototype>
+ </application_prototypes>
+ </item_prototype>
+ <item_prototype>
+ <name>[osd.{#OSD}] OSD latency commit</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.[osd.{#OSD},osd_latency_commit]</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>0</value_type>
+ <allowed_hosts/>
+ <units>ms</units>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description/>
+ <inventory_link>0</inventory_link>
+ <applications/>
+ <valuemap/>
+ <logtimefmt/>
+ <application_prototypes>
+ <application_prototype>
+ <name>Ceph CRUSH [{#CRUSH_RULE}]</name>
+ </application_prototype>
+ </application_prototypes>
+ </item_prototype>
+ <item_prototype>
+ <name>[osd.{#OSD}] OSD up</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.[osd.{#OSD},up]</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units/>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description/>
+ <inventory_link>0</inventory_link>
+ <applications/>
+ <valuemap/>
+ <logtimefmt/>
+ <application_prototypes>
+ <application_prototype>
+ <name>Ceph CRUSH [{#CRUSH_RULE}]</name>
+ </application_prototype>
+ </application_prototypes>
+ </item_prototype>
+ </item_prototypes>
+ <trigger_prototypes>
+ <trigger_prototype>
+ <expression>{ceph-mgr Zabbix module:ceph.[osd.{#OSD},up].last()}=0</expression>
+ <name>Ceph OSD osd.{#OSD} is DOWN</name>
+ <url/>
+ <status>0</status>
+ <priority>2</priority>
+ <description/>
+ <type>0</type>
+ <dependencies/>
+ </trigger_prototype>
+ <trigger_prototype>
+ <expression>{ceph-mgr Zabbix module:ceph.[osd.{#OSD},osd_fill].last()}&gt;={ceph-mgr Zabbix module:ceph.osd_full_ratio.last()}</expression>
+ <name>Ceph OSD osd.{#OSD} is full: {ITEM.VALUE}%</name>
+ <url/>
+ <status>0</status>
+ <priority>4</priority>
+ <description/>
+ <type>0</type>
+ <dependencies/>
+ </trigger_prototype>
+ <trigger_prototype>
+ <expression>{ceph-mgr Zabbix module:ceph.[osd.{#OSD},osd_fill].last()}&gt;={ceph-mgr Zabbix module:ceph.osd_nearfull_ratio.last()}</expression>
+ <name>Ceph OSD osd.{#OSD} is near full: {ITEM.VALUE}%</name>
+ <url/>
+ <status>0</status>
+ <priority>2</priority>
+ <description/>
+ <type>0</type>
+ <dependencies/>
+ </trigger_prototype>
+ </trigger_prototypes>
+ <graph_prototypes/>
+ <host_prototypes/>
+ </discovery_rule>
+ <discovery_rule>
+ <name>Ceph pool discovery</name>
+ <type>2</type>
+ <snmp_community/>
+ <snmp_oid/>
+ <key>ceph.zabbix.pool.discovery</key>
+ <delay>0</delay>
+ <status>0</status>
+ <allowed_hosts/>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <filter>
+ <evaltype>0</evaltype>
+ <formula/>
+ <conditions/>
+ </filter>
+ <lifetime>90</lifetime>
+ <description/>
+ <item_prototypes>
+ <item_prototype>
+ <name>[{#POOL}] Pool Used</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.[{#POOL},bytes_used]</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units>b</units>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description/>
+ <inventory_link>0</inventory_link>
+ <applications/>
+ <valuemap/>
+ <logtimefmt/>
+ <application_prototypes>
+ <application_prototype>
+ <name>Ceph CRUSH [{#CRUSH_RULE}]</name>
+ </application_prototype>
+ </application_prototypes>
+ </item_prototype>
+ <item_prototype>
+ <name>[{#POOL}] Pool RAW Used</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.[{#POOL},stored_raw]</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units>b</units>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description/>
+ <inventory_link>0</inventory_link>
+ <applications/>
+ <valuemap/>
+ <logtimefmt/>
+ <application_prototypes>
+ <application_prototype>
+ <name>Ceph CRUSH [{#CRUSH_RULE}]</name>
+ </application_prototype>
+ </application_prototypes>
+ </item_prototype>
+ <item_prototype>
+ <name>[{#POOL}] Pool Percent Used</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.[{#POOL},percent_used]</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>0</value_type>
+ <allowed_hosts/>
+ <units>%</units>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description/>
+ <inventory_link>0</inventory_link>
+ <applications/>
+ <valuemap/>
+ <logtimefmt/>
+ <application_prototypes>
+ <application_prototype>
+ <name>Ceph CRUSH [{#CRUSH_RULE}]</name>
+ </application_prototype>
+ </application_prototypes>
+ </item_prototype>
+ <item_prototype>
+ <name>[{#POOL}] Pool Read bandwidth</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.[{#POOL},rd_bytes]</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units>bytes</units>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description/>
+ <inventory_link>0</inventory_link>
+ <applications/>
+ <valuemap/>
+ <logtimefmt/>
+ <application_prototypes>
+ <application_prototype>
+ <name>Ceph CRUSH [{#CRUSH_RULE}]</name>
+ </application_prototype>
+ </application_prototypes>
+ </item_prototype>
+ <item_prototype>
+ <name>[{#POOL}] Pool Read operations</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.[{#POOL},rd_ops]</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units>ops</units>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description/>
+ <inventory_link>0</inventory_link>
+ <applications/>
+ <valuemap/>
+ <logtimefmt/>
+ <application_prototypes>
+ <application_prototype>
+ <name>Ceph CRUSH [{#CRUSH_RULE}]</name>
+ </application_prototype>
+ </application_prototypes>
+ </item_prototype>
+ <item_prototype>
+ <name>[{#POOL}] Pool Write bandwidth</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.[{#POOL},wr_bytes]</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units>bytes</units>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description/>
+ <inventory_link>0</inventory_link>
+ <applications/>
+ <valuemap/>
+ <logtimefmt/>
+ <application_prototypes>
+ <application_prototype>
+ <name>Ceph CRUSH [{#CRUSH_RULE}]</name>
+ </application_prototype>
+ </application_prototypes>
+ </item_prototype>
+ <item_prototype>
+ <name>[{#POOL}] Pool Write operations</name>
+ <type>2</type>
+ <snmp_community/>
+ <multiplier>0</multiplier>
+ <snmp_oid/>
+ <key>ceph.[{#POOL},wr_ops]</key>
+ <delay>0</delay>
+ <history>90</history>
+ <trends>365</trends>
+ <status>0</status>
+ <value_type>3</value_type>
+ <allowed_hosts/>
+ <units>ops</units>
+ <delta>0</delta>
+ <snmpv3_contextname/>
+ <snmpv3_securityname/>
+ <snmpv3_securitylevel>0</snmpv3_securitylevel>
+ <snmpv3_authprotocol>0</snmpv3_authprotocol>
+ <snmpv3_authpassphrase/>
+ <snmpv3_privprotocol>0</snmpv3_privprotocol>
+ <snmpv3_privpassphrase/>
+ <formula>1</formula>
+ <delay_flex/>
+ <params/>
+ <ipmi_sensor/>
+ <data_type>0</data_type>
+ <authtype>0</authtype>
+ <username/>
+ <password/>
+ <publickey/>
+ <privatekey/>
+ <port/>
+ <description/>
+ <inventory_link>0</inventory_link>
+ <applications/>
+ <valuemap/>
+ <logtimefmt/>
+ <application_prototypes>
+ <application_prototype>
+ <name>Ceph CRUSH [{#CRUSH_RULE}]</name>
+ </application_prototype>
+ </application_prototypes>
+ </item_prototype>
+ </item_prototypes>
+ <trigger_prototypes/>
+ <graph_prototypes/>
+ <host_prototypes/>
+ </discovery_rule>
+ </discovery_rules>
+ <macros/>
+ <templates/>
+ <screens>
+ <screen>
+ <name>Ceph</name>
+ <hsize>1</hsize>
+ <vsize>7</vsize>
+ <screen_items>
+ <screen_item>
+ <resourcetype>0</resourcetype>
+ <width>500</width>
+ <height>100</height>
+ <x>0</x>
+ <y>0</y>
+ <colspan>1</colspan>
+ <rowspan>1</rowspan>
+ <elements>0</elements>
+ <valign>0</valign>
+ <halign>0</halign>
+ <style>0</style>
+ <url/>
+ <dynamic>0</dynamic>
+ <sort_triggers>0</sort_triggers>
+ <resource>
+ <name>Ceph storage overview</name>
+ <host>ceph-mgr Zabbix module</host>
+ </resource>
+ <max_columns>3</max_columns>
+ <application/>
+ </screen_item>
+ <screen_item>
+ <resourcetype>0</resourcetype>
+ <width>900</width>
+ <height>200</height>
+ <x>0</x>
+ <y>1</y>
+ <colspan>1</colspan>
+ <rowspan>1</rowspan>
+ <elements>0</elements>
+ <valign>0</valign>
+ <halign>0</halign>
+ <style>0</style>
+ <url/>
+ <dynamic>0</dynamic>
+ <sort_triggers>0</sort_triggers>
+ <resource>
+ <name>Ceph free space</name>
+ <host>ceph-mgr Zabbix module</host>
+ </resource>
+ <max_columns>3</max_columns>
+ <application/>
+ </screen_item>
+ <screen_item>
+ <resourcetype>0</resourcetype>
+ <width>900</width>
+ <height>200</height>
+ <x>0</x>
+ <y>2</y>
+ <colspan>1</colspan>
+ <rowspan>1</rowspan>
+ <elements>0</elements>
+ <valign>0</valign>
+ <halign>0</halign>
+ <style>0</style>
+ <url/>
+ <dynamic>0</dynamic>
+ <sort_triggers>0</sort_triggers>
+ <resource>
+ <name>Ceph health</name>
+ <host>ceph-mgr Zabbix module</host>
+ </resource>
+ <max_columns>3</max_columns>
+ <application/>
+ </screen_item>
+ <screen_item>
+ <resourcetype>0</resourcetype>
+ <width>900</width>
+ <height>200</height>
+ <x>0</x>
+ <y>3</y>
+ <colspan>1</colspan>
+ <rowspan>1</rowspan>
+ <elements>0</elements>
+ <valign>0</valign>
+ <halign>0</halign>
+ <style>0</style>
+ <url/>
+ <dynamic>0</dynamic>
+ <sort_triggers>0</sort_triggers>
+ <resource>
+ <name>Ceph bandwidth</name>
+ <host>ceph-mgr Zabbix module</host>
+ </resource>
+ <max_columns>3</max_columns>
+ <application/>
+ </screen_item>
+ <screen_item>
+ <resourcetype>0</resourcetype>
+ <width>900</width>
+ <height>200</height>
+ <x>0</x>
+ <y>4</y>
+ <colspan>1</colspan>
+ <rowspan>1</rowspan>
+ <elements>0</elements>
+ <valign>0</valign>
+ <halign>0</halign>
+ <style>0</style>
+ <url/>
+ <dynamic>0</dynamic>
+ <sort_triggers>0</sort_triggers>
+ <resource>
+ <name>Ceph I/O</name>
+ <host>ceph-mgr Zabbix module</host>
+ </resource>
+ <max_columns>3</max_columns>
+ <application/>
+ </screen_item>
+ <screen_item>
+ <resourcetype>0</resourcetype>
+ <width>900</width>
+ <height>200</height>
+ <x>0</x>
+ <y>5</y>
+ <colspan>1</colspan>
+ <rowspan>1</rowspan>
+ <elements>0</elements>
+ <valign>0</valign>
+ <halign>0</halign>
+ <style>0</style>
+ <url/>
+ <dynamic>0</dynamic>
+ <sort_triggers>0</sort_triggers>
+ <resource>
+ <name>Ceph OSD utilization</name>
+ <host>ceph-mgr Zabbix module</host>
+ </resource>
+ <max_columns>3</max_columns>
+ <application/>
+ </screen_item>
+ <screen_item>
+ <resourcetype>0</resourcetype>
+ <width>900</width>
+ <height>200</height>
+ <x>0</x>
+ <y>6</y>
+ <colspan>1</colspan>
+ <rowspan>1</rowspan>
+ <elements>0</elements>
+ <valign>0</valign>
+ <halign>0</halign>
+ <style>0</style>
+ <url/>
+ <dynamic>0</dynamic>
+ <sort_triggers>0</sort_triggers>
+ <resource>
+ <name>Ceph OSD latency</name>
+ <host>ceph-mgr Zabbix module</host>
+ </resource>
+ <max_columns>3</max_columns>
+ <application/>
+ </screen_item>
+ </screen_items>
+ </screen>
+ </screens>
+ </template>
+ </templates>
+ <triggers>
+ <trigger>
+ <expression>{ceph-mgr Zabbix module:ceph.overall_status_int.last()}=2</expression>
+ <name>Ceph cluster in ERR state</name>
+ <url/>
+ <status>0</status>
+ <priority>5</priority>
+ <description>Ceph cluster is in ERR state</description>
+ <type>0</type>
+ <dependencies/>
+ </trigger>
+ <trigger>
+ <expression>{ceph-mgr Zabbix module:ceph.overall_status_int.avg(1h)}=1</expression>
+ <name>Ceph cluster in WARN state</name>
+ <url/>
+ <status>0</status>
+ <priority>4</priority>
+ <description>Issue a trigger if Ceph cluster is in WARN state for &gt;1h</description>
+ <type>0</type>
+ <dependencies/>
+ </trigger>
+ <trigger>
+ <expression>{ceph-mgr Zabbix module:ceph.num_osd_in.abschange()}&gt;0</expression>
+ <name>Number of IN OSDs changed</name>
+ <url/>
+ <status>0</status>
+ <priority>2</priority>
+ <description>Amount of OSDs in IN state changed</description>
+ <type>0</type>
+ <dependencies/>
+ </trigger>
+ <trigger>
+ <expression>{ceph-mgr Zabbix module:ceph.num_osd_up.abschange()}&gt;0</expression>
+ <name>Number of UP OSDs changed</name>
+ <url/>
+ <status>0</status>
+ <priority>2</priority>
+ <description>Amount of OSDs in UP state changed</description>
+ <type>0</type>
+ <dependencies/>
+ </trigger>
+ </triggers>
+ <graphs>
+ <graph>
+ <name>Ceph bandwidth</name>
+ <width>900</width>
+ <height>200</height>
+ <yaxismin>0.0000</yaxismin>
+ <yaxismax>100.0000</yaxismax>
+ <show_work_period>1</show_work_period>
+ <show_triggers>1</show_triggers>
+ <type>1</type>
+ <show_legend>1</show_legend>
+ <show_3d>0</show_3d>
+ <percent_left>0.0000</percent_left>
+ <percent_right>0.0000</percent_right>
+ <ymin_type_1>0</ymin_type_1>
+ <ymax_type_1>0</ymax_type_1>
+ <ymin_item_1>0</ymin_item_1>
+ <ymax_item_1>0</ymax_item_1>
+ <graph_items>
+ <graph_item>
+ <sortorder>0</sortorder>
+ <drawtype>0</drawtype>
+ <color>1A7C11</color>
+ <yaxisside>0</yaxisside>
+ <calc_fnc>4</calc_fnc>
+ <type>0</type>
+ <item>
+ <host>ceph-mgr Zabbix module</host>
+ <key>ceph.rd_bytes</key>
+ </item>
+ </graph_item>
+ <graph_item>
+ <sortorder>1</sortorder>
+ <drawtype>0</drawtype>
+ <color>F63100</color>
+ <yaxisside>0</yaxisside>
+ <calc_fnc>4</calc_fnc>
+ <type>0</type>
+ <item>
+ <host>ceph-mgr Zabbix module</host>
+ <key>ceph.wr_bytes</key>
+ </item>
+ </graph_item>
+ </graph_items>
+ </graph>
+ <graph>
+ <name>Ceph free space</name>
+ <width>900</width>
+ <height>200</height>
+ <yaxismin>0.0000</yaxismin>
+ <yaxismax>100.0000</yaxismax>
+ <show_work_period>1</show_work_period>
+ <show_triggers>1</show_triggers>
+ <type>0</type>
+ <show_legend>1</show_legend>
+ <show_3d>0</show_3d>
+ <percent_left>0.0000</percent_left>
+ <percent_right>0.0000</percent_right>
+ <ymin_type_1>1</ymin_type_1>
+ <ymax_type_1>2</ymax_type_1>
+ <ymin_item_1>0</ymin_item_1>
+ <ymax_item_1>
+ <host>ceph-mgr Zabbix module</host>
+ <key>ceph.total_bytes</key>
+ </ymax_item_1>
+ <graph_items>
+ <graph_item>
+ <sortorder>0</sortorder>
+ <drawtype>0</drawtype>
+ <color>00AA00</color>
+ <yaxisside>0</yaxisside>
+ <calc_fnc>4</calc_fnc>
+ <type>0</type>
+ <item>
+ <host>ceph-mgr Zabbix module</host>
+ <key>ceph.total_avail_bytes</key>
+ </item>
+ </graph_item>
+ <graph_item>
+ <sortorder>1</sortorder>
+ <drawtype>0</drawtype>
+ <color>DD0000</color>
+ <yaxisside>0</yaxisside>
+ <calc_fnc>4</calc_fnc>
+ <type>0</type>
+ <item>
+ <host>ceph-mgr Zabbix module</host>
+ <key>ceph.total_used_bytes</key>
+ </item>
+ </graph_item>
+ </graph_items>
+ </graph>
+ <graph>
+ <name>Ceph health</name>
+ <width>900</width>
+ <height>200</height>
+ <yaxismin>0.0000</yaxismin>
+ <yaxismax>2.0000</yaxismax>
+ <show_work_period>1</show_work_period>
+ <show_triggers>1</show_triggers>
+ <type>0</type>
+ <show_legend>1</show_legend>
+ <show_3d>0</show_3d>
+ <percent_left>0.0000</percent_left>
+ <percent_right>0.0000</percent_right>
+ <ymin_type_1>1</ymin_type_1>
+ <ymax_type_1>1</ymax_type_1>
+ <ymin_item_1>0</ymin_item_1>
+ <ymax_item_1>0</ymax_item_1>
+ <graph_items>
+ <graph_item>
+ <sortorder>0</sortorder>
+ <drawtype>0</drawtype>
+ <color>1A7C11</color>
+ <yaxisside>0</yaxisside>
+ <calc_fnc>7</calc_fnc>
+ <type>0</type>
+ <item>
+ <host>ceph-mgr Zabbix module</host>
+ <key>ceph.overall_status_int</key>
+ </item>
+ </graph_item>
+ </graph_items>
+ </graph>
+ <graph>
+ <name>Ceph I/O</name>
+ <width>900</width>
+ <height>200</height>
+ <yaxismin>0.0000</yaxismin>
+ <yaxismax>100.0000</yaxismax>
+ <show_work_period>1</show_work_period>
+ <show_triggers>1</show_triggers>
+ <type>1</type>
+ <show_legend>1</show_legend>
+ <show_3d>0</show_3d>
+ <percent_left>0.0000</percent_left>
+ <percent_right>0.0000</percent_right>
+ <ymin_type_1>1</ymin_type_1>
+ <ymax_type_1>0</ymax_type_1>
+ <ymin_item_1>0</ymin_item_1>
+ <ymax_item_1>0</ymax_item_1>
+ <graph_items>
+ <graph_item>
+ <sortorder>0</sortorder>
+ <drawtype>0</drawtype>
+ <color>1A7C11</color>
+ <yaxisside>0</yaxisside>
+ <calc_fnc>4</calc_fnc>
+ <type>0</type>
+ <item>
+ <host>ceph-mgr Zabbix module</host>
+ <key>ceph.rd_ops</key>
+ </item>
+ </graph_item>
+ <graph_item>
+ <sortorder>1</sortorder>
+ <drawtype>0</drawtype>
+ <color>F63100</color>
+ <yaxisside>0</yaxisside>
+ <calc_fnc>4</calc_fnc>
+ <type>0</type>
+ <item>
+ <host>ceph-mgr Zabbix module</host>
+ <key>ceph.wr_ops</key>
+ </item>
+ </graph_item>
+ </graph_items>
+ </graph>
+ <graph>
+ <name>Ceph OSD latency</name>
+ <width>900</width>
+ <height>200</height>
+ <yaxismin>0.0000</yaxismin>
+ <yaxismax>100.0000</yaxismax>
+ <show_work_period>1</show_work_period>
+ <show_triggers>1</show_triggers>
+ <type>0</type>
+ <show_legend>1</show_legend>
+ <show_3d>0</show_3d>
+ <percent_left>0.0000</percent_left>
+ <percent_right>0.0000</percent_right>
+ <ymin_type_1>0</ymin_type_1>
+ <ymax_type_1>0</ymax_type_1>
+ <ymin_item_1>0</ymin_item_1>
+ <ymax_item_1>0</ymax_item_1>
+ <graph_items>
+ <graph_item>
+ <sortorder>0</sortorder>
+ <drawtype>0</drawtype>
+ <color>1A7C11</color>
+ <yaxisside>0</yaxisside>
+ <calc_fnc>4</calc_fnc>
+ <type>0</type>
+ <item>
+ <host>ceph-mgr Zabbix module</host>
+ <key>ceph.osd_latency_apply_avg</key>
+ </item>
+ </graph_item>
+ <graph_item>
+ <sortorder>1</sortorder>
+ <drawtype>0</drawtype>
+ <color>F63100</color>
+ <yaxisside>0</yaxisside>
+ <calc_fnc>4</calc_fnc>
+ <type>0</type>
+ <item>
+ <host>ceph-mgr Zabbix module</host>
+ <key>ceph.osd_latency_commit_avg</key>
+ </item>
+ </graph_item>
+ <graph_item>
+ <sortorder>2</sortorder>
+ <drawtype>0</drawtype>
+ <color>2774A4</color>
+ <yaxisside>0</yaxisside>
+ <calc_fnc>4</calc_fnc>
+ <type>0</type>
+ <item>
+ <host>ceph-mgr Zabbix module</host>
+ <key>ceph.osd_latency_apply_max</key>
+ </item>
+ </graph_item>
+ <graph_item>
+ <sortorder>3</sortorder>
+ <drawtype>0</drawtype>
+ <color>A54F10</color>
+ <yaxisside>0</yaxisside>
+ <calc_fnc>4</calc_fnc>
+ <type>0</type>
+ <item>
+ <host>ceph-mgr Zabbix module</host>
+ <key>ceph.osd_latency_commit_max</key>
+ </item>
+ </graph_item>
+ <graph_item>
+ <sortorder>4</sortorder>
+ <drawtype>0</drawtype>
+ <color>FC6EA3</color>
+ <yaxisside>0</yaxisside>
+ <calc_fnc>4</calc_fnc>
+ <type>0</type>
+ <item>
+ <host>ceph-mgr Zabbix module</host>
+ <key>ceph.osd_latency_apply_min</key>
+ </item>
+ </graph_item>
+ <graph_item>
+ <sortorder>5</sortorder>
+ <drawtype>0</drawtype>
+ <color>6C59DC</color>
+ <yaxisside>0</yaxisside>
+ <calc_fnc>4</calc_fnc>
+ <type>0</type>
+ <item>
+ <host>ceph-mgr Zabbix module</host>
+ <key>ceph.osd_latency_commit_min</key>
+ </item>
+ </graph_item>
+ </graph_items>
+ </graph>
+ <graph>
+ <name>Ceph OSD utilization</name>
+ <width>900</width>
+ <height>200</height>
+ <yaxismin>0.0000</yaxismin>
+ <yaxismax>100.0000</yaxismax>
+ <show_work_period>1</show_work_period>
+ <show_triggers>1</show_triggers>
+ <type>0</type>
+ <show_legend>1</show_legend>
+ <show_3d>0</show_3d>
+ <percent_left>0.0000</percent_left>
+ <percent_right>0.0000</percent_right>
+ <ymin_type_1>1</ymin_type_1>
+ <ymax_type_1>1</ymax_type_1>
+ <ymin_item_1>0</ymin_item_1>
+ <ymax_item_1>0</ymax_item_1>
+ <graph_items>
+ <graph_item>
+ <sortorder>0</sortorder>
+ <drawtype>0</drawtype>
+ <color>0000CC</color>
+ <yaxisside>0</yaxisside>
+ <calc_fnc>2</calc_fnc>
+ <type>0</type>
+ <item>
+ <host>ceph-mgr Zabbix module</host>
+ <key>ceph.osd_nearfull_ratio</key>
+ </item>
+ </graph_item>
+ <graph_item>
+ <sortorder>1</sortorder>
+ <drawtype>0</drawtype>
+ <color>F63100</color>
+ <yaxisside>0</yaxisside>
+ <calc_fnc>2</calc_fnc>
+ <type>0</type>
+ <item>
+ <host>ceph-mgr Zabbix module</host>
+ <key>ceph.osd_full_ratio</key>
+ </item>
+ </graph_item>
+ <graph_item>
+ <sortorder>2</sortorder>
+ <drawtype>0</drawtype>
+ <color>CC00CC</color>
+ <yaxisside>0</yaxisside>
+ <calc_fnc>2</calc_fnc>
+ <type>0</type>
+ <item>
+ <host>ceph-mgr Zabbix module</host>
+ <key>ceph.osd_backfillfull_ratio</key>
+ </item>
+ </graph_item>
+ <graph_item>
+ <sortorder>3</sortorder>
+ <drawtype>0</drawtype>
+ <color>A54F10</color>
+ <yaxisside>0</yaxisside>
+ <calc_fnc>2</calc_fnc>
+ <type>0</type>
+ <item>
+ <host>ceph-mgr Zabbix module</host>
+ <key>ceph.osd_max_fill</key>
+ </item>
+ </graph_item>
+ <graph_item>
+ <sortorder>4</sortorder>
+ <drawtype>0</drawtype>
+ <color>FC6EA3</color>
+ <yaxisside>0</yaxisside>
+ <calc_fnc>2</calc_fnc>
+ <type>0</type>
+ <item>
+ <host>ceph-mgr Zabbix module</host>
+ <key>ceph.osd_avg_fill</key>
+ </item>
+ </graph_item>
+ <graph_item>
+ <sortorder>5</sortorder>
+ <drawtype>0</drawtype>
+ <color>6C59DC</color>
+ <yaxisside>0</yaxisside>
+ <calc_fnc>2</calc_fnc>
+ <type>0</type>
+ <item>
+ <host>ceph-mgr Zabbix module</host>
+ <key>ceph.osd_min_fill</key>
+ </item>
+ </graph_item>
+ </graph_items>
+ </graph>
+ <graph>
+ <name>Ceph storage overview</name>
+ <width>900</width>
+ <height>200</height>
+ <yaxismin>0.0000</yaxismin>
+ <yaxismax>0.0000</yaxismax>
+ <show_work_period>0</show_work_period>
+ <show_triggers>0</show_triggers>
+ <type>2</type>
+ <show_legend>1</show_legend>
+ <show_3d>0</show_3d>
+ <percent_left>0.0000</percent_left>
+ <percent_right>0.0000</percent_right>
+ <ymin_type_1>0</ymin_type_1>
+ <ymax_type_1>0</ymax_type_1>
+ <ymin_item_1>0</ymin_item_1>
+ <ymax_item_1>0</ymax_item_1>
+ <graph_items>
+ <graph_item>
+ <sortorder>0</sortorder>
+ <drawtype>0</drawtype>
+ <color>F63100</color>
+ <yaxisside>0</yaxisside>
+ <calc_fnc>2</calc_fnc>
+ <type>0</type>
+ <item>
+ <host>ceph-mgr Zabbix module</host>
+ <key>ceph.total_used_bytes</key>
+ </item>
+ </graph_item>
+ <graph_item>
+ <sortorder>1</sortorder>
+ <drawtype>0</drawtype>
+ <color>00CC00</color>
+ <yaxisside>0</yaxisside>
+ <calc_fnc>2</calc_fnc>
+ <type>0</type>
+ <item>
+ <host>ceph-mgr Zabbix module</host>
+ <key>ceph.total_avail_bytes</key>
+ </item>
+ </graph_item>
+ </graph_items>
+ </graph>
+ </graphs>
+</zabbix_export>