From 50485bedfd9818165aa1d039d0abe95a559134b7 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Fri, 8 Feb 2019 08:31:03 +0100 Subject: Merging upstream version 1.12.0. Signed-off-by: Daniel Baumann --- collectors/python.d.plugin/ipfs/README.md | 2 ++ collectors/python.d.plugin/ipfs/ipfs.chart.py | 36 +++++++++++---------------- collectors/python.d.plugin/ipfs/ipfs.conf | 10 +++----- 3 files changed, 20 insertions(+), 28 deletions(-) (limited to 'collectors/python.d.plugin/ipfs') diff --git a/collectors/python.d.plugin/ipfs/README.md b/collectors/python.d.plugin/ipfs/README.md index a30649a5f..a83920370 100644 --- a/collectors/python.d.plugin/ipfs/README.md +++ b/collectors/python.d.plugin/ipfs/README.md @@ -23,3 +23,5 @@ localhost: --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fipfs%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/ipfs/ipfs.chart.py b/collectors/python.d.plugin/ipfs/ipfs.chart.py index 3f6794e48..8c89b4be1 100644 --- a/collectors/python.d.plugin/ipfs/ipfs.chart.py +++ b/collectors/python.d.plugin/ipfs/ipfs.chart.py @@ -7,25 +7,17 @@ import json from bases.FrameworkServices.UrlService import UrlService -# default module values (can be overridden per job in `config`) -# update_every = 2 -priority = 60000 -retries = 60 -# default job configuration (overridden by python.d.plugin) -# config = {'local': { -# 'update_every': update_every, -# 'retries': retries, -# 'priority': priority, -# 'url': 'http://localhost:5001' -# }} - -# charts order (can be overridden if you want less charts, or different order) -ORDER = ['bandwidth', 'peers', 'repo_size', 'repo_objects'] +ORDER = [ + 'bandwidth', + 'peers', + 'repo_size', + 'repo_objects', +] CHARTS = { 'bandwidth': { - 'options': [None, 'IPFS Bandwidth', 'kbits/s', 'Bandwidth', 'ipfs.bandwidth', 'line'], + 'options': [None, 'IPFS Bandwidth', 'kilobits/s', 'Bandwidth', 'ipfs.bandwidth', 'line'], 'lines': [ ['in', None, 'absolute', 8, 1000], ['out', None, 'absolute', -8, 1000] @@ -38,10 +30,10 @@ CHARTS = { ] }, 'repo_size': { - 'options': [None, 'IPFS Repo Size', 'GB', 'Size', 'ipfs.repo_size', 'area'], + 'options': [None, 'IPFS Repo Size', 'GiB', 'Size', 'ipfs.repo_size', 'area'], 'lines': [ - ['avail', None, 'absolute', 1, 1e9], - ['size', None, 'absolute', 1, 1e9], + ['avail', None, 'absolute', 1, 1 << 30], + ['size', None, 'absolute', 1, 1 << 30], ] }, 'repo_objects': { @@ -69,11 +61,11 @@ SI_zeroes = { class Service(UrlService): def __init__(self, configuration=None, name=None): UrlService.__init__(self, configuration=configuration, name=name) - self.baseurl = self.configuration.get('url', 'http://localhost:5001') self.order = ORDER self.definitions = CHARTS - self.__storage_max = None + self.baseurl = self.configuration.get('url', 'http://localhost:5001') self.do_pinapi = self.configuration.get('pinapi') + self.__storage_max = None def _get_json(self, sub_url): """ @@ -135,6 +127,6 @@ class Service(UrlService): for new_key, orig_key, xmute in cfg[suburl]: try: r[new_key] = xmute(in_json[orig_key]) - except Exception: - continue + except Exception as error: + self.debug(error) return r or None diff --git a/collectors/python.d.plugin/ipfs/ipfs.conf b/collectors/python.d.plugin/ipfs/ipfs.conf index e3df0f6bb..c7e186487 100644 --- a/collectors/python.d.plugin/ipfs/ipfs.conf +++ b/collectors/python.d.plugin/ipfs/ipfs.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, ipfs also supports the following: -- cgit v1.2.3