summaryrefslogtreecommitdiffstats
path: root/collectors/python.d.plugin/ipfs
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--collectors/python.d.plugin/ipfs/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/ipfs/README.md51
-rw-r--r--collectors/python.d.plugin/ipfs/ipfs.chart.py149
-rw-r--r--collectors/python.d.plugin/ipfs/ipfs.conf82
4 files changed, 295 insertions, 0 deletions
diff --git a/collectors/python.d.plugin/ipfs/Makefile.inc b/collectors/python.d.plugin/ipfs/Makefile.inc
new file mode 100644
index 0000000..68458cb
--- /dev/null
+++ b/collectors/python.d.plugin/ipfs/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += ipfs/ipfs.chart.py
+dist_pythonconfig_DATA += ipfs/ipfs.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += ipfs/README.md ipfs/Makefile.inc
+
diff --git a/collectors/python.d.plugin/ipfs/README.md b/collectors/python.d.plugin/ipfs/README.md
new file mode 100644
index 0000000..4d3b0ec
--- /dev/null
+++ b/collectors/python.d.plugin/ipfs/README.md
@@ -0,0 +1,51 @@
+<!--
+title: "IPFS monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/ipfs/README.md
+sidebar_label: "IPFS"
+-->
+
+# IPFS monitoring with Netdata
+
+Collects [`IPFS`](https://ipfs.io) basic information like file system bandwidth, peers and repo metrics.
+
+## Charts
+
+It produces the following charts:
+
+- Bandwidth in `kilobits/s`
+- Peers in `peers`
+- Repo Size in `GiB`
+- Repo Objects in `objects`
+
+## Configuration
+
+Edit the `python.d/ipfs.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/ipfs.conf
+```
+
+---
+
+Calls to the following endpoints are disabled due to `IPFS` bugs:
+
+- `/api/v0/stats/repo` (https://github.com/ipfs/go-ipfs/issues/3874)
+- `/api/v0/pin/ls` (https://github.com/ipfs/go-ipfs/issues/7528)
+
+Can be enabled in the collector configuration file.
+
+The configuration needs only `url` to `IPFS` server, here is an example for 2 `IPFS` instances:
+
+```yaml
+localhost:
+ url: 'http://localhost:5001'
+
+remote:
+ url: 'http://203.0.113.10::5001'
+```
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fipfs%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/ipfs/ipfs.chart.py b/collectors/python.d.plugin/ipfs/ipfs.chart.py
new file mode 100644
index 0000000..abfc9c4
--- /dev/null
+++ b/collectors/python.d.plugin/ipfs/ipfs.chart.py
@@ -0,0 +1,149 @@
+# -*- coding: utf-8 -*-
+# Description: IPFS netdata python.d module
+# Authors: davidak
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import json
+
+from bases.FrameworkServices.UrlService import UrlService
+
+ORDER = [
+ 'bandwidth',
+ 'peers',
+ 'repo_size',
+ 'repo_objects',
+]
+
+CHARTS = {
+ 'bandwidth': {
+ 'options': [None, 'IPFS Bandwidth', 'kilobits/s', 'Bandwidth', 'ipfs.bandwidth', 'line'],
+ 'lines': [
+ ['in', None, 'absolute', 8, 1000],
+ ['out', None, 'absolute', -8, 1000]
+ ]
+ },
+ 'peers': {
+ 'options': [None, 'IPFS Peers', 'peers', 'Peers', 'ipfs.peers', 'line'],
+ 'lines': [
+ ['peers', None, 'absolute']
+ ]
+ },
+ 'repo_size': {
+ 'options': [None, 'IPFS Repo Size', 'GiB', 'Size', 'ipfs.repo_size', 'area'],
+ 'lines': [
+ ['avail', None, 'absolute', 1, 1 << 30],
+ ['size', None, 'absolute', 1, 1 << 30],
+ ]
+ },
+ 'repo_objects': {
+ 'options': [None, 'IPFS Repo Objects', 'objects', 'Objects', 'ipfs.repo_objects', 'line'],
+ 'lines': [
+ ['objects', None, 'absolute', 1, 1],
+ ['pinned', None, 'absolute', 1, 1],
+ ['recursive_pins', None, 'absolute', 1, 1]
+ ]
+ }
+}
+
+SI_zeroes = {
+ 'k': 3,
+ 'm': 6,
+ 'g': 9,
+ 't': 12,
+ 'p': 15,
+ 'e': 18,
+ 'z': 21,
+ 'y': 24
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.baseurl = self.configuration.get('url', 'http://localhost:5001')
+ self.method = "POST"
+ self.do_pinapi = self.configuration.get('pinapi')
+ self.do_repoapi = self.configuration.get('repoapi')
+ self.__storage_max = None
+
+ def _get_json(self, sub_url):
+ """
+ :return: json decoding of the specified url
+ """
+ self.url = self.baseurl + sub_url
+ try:
+ return json.loads(self._get_raw_data())
+ except (TypeError, ValueError):
+ return dict()
+
+ @staticmethod
+ def _recursive_pins(keys):
+ return sum(1 for k in keys if keys[k]['Type'] == b'recursive')
+
+ @staticmethod
+ def _dehumanize(store_max):
+ # convert from '10Gb' to 10000000000
+ if not isinstance(store_max, int):
+ store_max = store_max.lower()
+ if store_max.endswith('b'):
+ val, units = store_max[:-2], store_max[-2]
+ if units in SI_zeroes:
+ val += '0' * SI_zeroes[units]
+ store_max = val
+ try:
+ store_max = int(store_max)
+ except (TypeError, ValueError):
+ store_max = None
+ return store_max
+
+ def _storagemax(self, store_cfg):
+ if self.__storage_max is None:
+ self.__storage_max = self._dehumanize(store_cfg)
+ return self.__storage_max
+
+ def _get_data(self):
+ """
+ Get data from API
+ :return: dict
+ """
+ # suburl : List of (result-key, original-key, transform-func)
+ cfg = {
+ '/api/v0/stats/bw':
+ [
+ ('in', 'RateIn', int),
+ ('out', 'RateOut', int),
+ ],
+ '/api/v0/swarm/peers':
+ [
+ ('peers', 'Peers', len),
+ ],
+ }
+ if self.do_repoapi:
+ cfg.update({
+ '/api/v0/stats/repo':
+ [
+ ('size', 'RepoSize', int),
+ ('objects', 'NumObjects', int),
+ ('avail', 'StorageMax', self._storagemax),
+ ],
+ })
+
+ if self.do_pinapi:
+ cfg.update({
+ '/api/v0/pin/ls':
+ [
+ ('pinned', 'Keys', len),
+ ('recursive_pins', 'Keys', self._recursive_pins),
+ ]
+ })
+ r = dict()
+ for suburl in cfg:
+ in_json = self._get_json(suburl)
+ for new_key, orig_key, xmute in cfg[suburl]:
+ try:
+ r[new_key] = xmute(in_json[orig_key])
+ except Exception as error:
+ self.debug(error)
+ return r or None
diff --git a/collectors/python.d.plugin/ipfs/ipfs.conf b/collectors/python.d.plugin/ipfs/ipfs.conf
new file mode 100644
index 0000000..8b167b3
--- /dev/null
+++ b/collectors/python.d.plugin/ipfs/ipfs.conf
@@ -0,0 +1,82 @@
+# netdata python.d.plugin configuration for ipfs
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, ipfs also supports the following:
+#
+# url: 'URL' # URL to the IPFS API
+# repoapi: no # Collect repo metrics
+# # Currently defaults to disabled due to IPFS Bug
+# # https://github.com/ipfs/go-ipfs/issues/7528
+# # resulting in very high CPU Usage
+# pinapi: no # Set status of IPFS pinned object polling
+# # Currently defaults to disabled due to IPFS Bug
+# # https://github.com/ipfs/go-ipfs/issues/3874
+# # resulting in very high CPU Usage
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name: 'local'
+ url: 'http://localhost:5001'
+ repoapi: no
+ pinapi: no