summaryrefslogtreecommitdiffstats
path: root/src/collectors/python.d.plugin/example/example.conf
diff options
context:
space:
mode:
Diffstat (limited to 'src/collectors/python.d.plugin/example/example.conf')
-rw-r--r--src/collectors/python.d.plugin/example/example.conf87
1 files changed, 0 insertions, 87 deletions
diff --git a/src/collectors/python.d.plugin/example/example.conf b/src/collectors/python.d.plugin/example/example.conf
deleted file mode 100644
index 31261b84..00000000
--- a/src/collectors/python.d.plugin/example/example.conf
+++ /dev/null
@@ -1,87 +0,0 @@
-# netdata python.d.plugin configuration for example
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear on the dashboard
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, example also supports the following:
-#
-# num_lines: 4 # the number of lines to create
-# lower: 0 # the lower bound of numbers to randomly sample from
-# upper: 100 # the upper bound of numbers to randomly sample from
-#
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-
-four_lines:
- name: "Four Lines" # the JOB's name as it will appear on the dashboard
- update_every: 1 # the JOB's data collection frequency
- priority: 60000 # the JOB's order on the dashboard
- penalty: yes # the JOB's penalty
- autodetection_retry: 0 # the JOB's re-check interval in seconds
- num_lines: 4 # the number of lines to create
- lower: 0 # the lower bound of numbers to randomly sample from
- upper: 100 # the upper bound of numbers to randomly sample from
-
-# if you wanted to make another job to run in addition to the one above then
-# you would just uncomment the job configuration below.
-# two_lines:
-# name: "Two Lines" # the JOB's name as it will appear on the dashboard
-# num_lines: 2 # the number of lines to create
-# lower: 50 # the lower bound of numbers to randomly sample from
-# upper: 75 # the upper bound of numbers to randomly sample from