From 5da14042f70711ea5cf66e034699730335462f66 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 5 May 2024 14:08:03 +0200 Subject: Merging upstream version 1.45.3+dfsg. Signed-off-by: Daniel Baumann --- src/collectors/python.d.plugin/ceph/metadata.yaml | 223 ++++++++++++++++++++++ 1 file changed, 223 insertions(+) create mode 100644 src/collectors/python.d.plugin/ceph/metadata.yaml (limited to 'src/collectors/python.d.plugin/ceph/metadata.yaml') diff --git a/src/collectors/python.d.plugin/ceph/metadata.yaml b/src/collectors/python.d.plugin/ceph/metadata.yaml new file mode 100644 index 000000000..642941137 --- /dev/null +++ b/src/collectors/python.d.plugin/ceph/metadata.yaml @@ -0,0 +1,223 @@ +plugin_name: python.d.plugin +modules: + - meta: + plugin_name: python.d.plugin + module_name: ceph + monitored_instance: + name: Ceph + link: 'https://ceph.io/' + categories: + - data-collection.storage-mount-points-and-filesystems + icon_filename: 'ceph.svg' + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: '' + keywords: + - ceph + - storage + most_popular: false + overview: + data_collection: + metrics_description: 'This collector monitors Ceph metrics about Cluster statistics, OSD usage, latency and Pool statistics.' + method_description: 'Uses the `rados` python module to connect to a Ceph cluster.' + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: '' + default_behavior: + auto_detection: + description: '' + limits: + description: '' + performance_impact: + description: '' + setup: + prerequisites: + list: + - title: '`rados` python module' + description: 'Make sure the `rados` python module is installed' + - title: 'Granting read permissions to ceph group from keyring file' + description: 'Execute: `chmod 640 /etc/ceph/ceph.client.admin.keyring`' + - title: 'Create a specific rados_id' + description: 'You can optionally create a rados_id to use instead of admin' + configuration: + file: + name: python.d/ceph.conf + options: + description: | + There are 2 sections: + + * Global variables + * One or more JOBS that can define multiple different instances to monitor. + + The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. + + Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. + + Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. + folding: + title: "Config options" + enabled: true + list: + - name: update_every + description: Sets the default data collection frequency. + default_value: 5 + required: false + - name: priority + description: Controls the order of charts at the netdata dashboard. + default_value: 60000 + required: false + - name: autodetection_retry + description: Sets the job re-check interval in seconds. + default_value: 0 + required: false + - name: penalty + description: Indicates whether to apply penalty to update_every in case of failures. + default_value: yes + required: false + - name: name + description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. + default_value: '' + required: false + - name: config_file + description: Ceph config file + default_value: '' + required: true + - name: keyring_file + description: Ceph keyring file. netdata user must be added into ceph group and keyring file must be read group permission. + default_value: '' + required: true + - name: rados_id + description: A rados user id to use for connecting to the Ceph cluster. + default_value: 'admin' + required: false + examples: + folding: + enabled: true + title: "Config" + list: + - name: Basic local Ceph cluster + description: A basic configuration to connect to a local Ceph cluster. + folding: + enabled: false + config: | + local: + config_file: '/etc/ceph/ceph.conf' + keyring_file: '/etc/ceph/ceph.client.admin.keyring' + troubleshooting: + problems: + list: [] + alerts: + - name: ceph_cluster_space_usage + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/ceph.conf + metric: ceph.general_usage + info: cluster disk space utilization + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: "These metrics refer to the entire monitored application." + labels: [] + metrics: + - name: ceph.general_usage + description: Ceph General Space + unit: "KiB" + chart_type: stacked + dimensions: + - name: avail + - name: used + - name: ceph.general_objects + description: Ceph General Objects + unit: "objects" + chart_type: area + dimensions: + - name: cluster + - name: ceph.general_bytes + description: Ceph General Read/Write Data/s + unit: "KiB/s" + chart_type: area + dimensions: + - name: read + - name: write + - name: ceph.general_operations + description: Ceph General Read/Write Operations/s + unit: "operations" + chart_type: area + dimensions: + - name: read + - name: write + - name: ceph.general_latency + description: Ceph General Apply/Commit latency + unit: "milliseconds" + chart_type: area + dimensions: + - name: apply + - name: commit + - name: ceph.pool_usage + description: Ceph Pools + unit: "KiB" + chart_type: line + dimensions: + - name: a dimension per Ceph Pool + - name: ceph.pool_objects + description: Ceph Pools + unit: "objects" + chart_type: line + dimensions: + - name: a dimension per Ceph Pool + - name: ceph.pool_read_bytes + description: Ceph Read Pool Data/s + unit: "KiB/s" + chart_type: area + dimensions: + - name: a dimension per Ceph Pool + - name: ceph.pool_write_bytes + description: Ceph Write Pool Data/s + unit: "KiB/s" + chart_type: area + dimensions: + - name: a dimension per Ceph Pool + - name: ceph.pool_read_operations + description: Ceph Read Pool Operations/s + unit: "operations" + chart_type: area + dimensions: + - name: a dimension per Ceph Pool + - name: ceph.pool_write_operations + description: Ceph Write Pool Operations/s + unit: "operations" + chart_type: area + dimensions: + - name: a dimension per Ceph Pool + - name: ceph.osd_usage + description: Ceph OSDs + unit: "KiB" + chart_type: line + dimensions: + - name: a dimension per Ceph OSD + - name: ceph.osd_size + description: Ceph OSDs size + unit: "KiB" + chart_type: line + dimensions: + - name: a dimension per Ceph OSD + - name: ceph.apply_latency + description: Ceph OSDs apply latency + unit: "milliseconds" + chart_type: line + dimensions: + - name: a dimension per Ceph OSD + - name: ceph.commit_latency + description: Ceph OSDs commit latency + unit: "milliseconds" + chart_type: line + dimensions: + - name: a dimension per Ceph OSD -- cgit v1.2.3