summaryrefslogtreecommitdiffstats
path: root/collectors/python.d.plugin/zscores/zscores.conf
blob: fab18c78736e0121606a5cabdb08b5d4ee739ed3 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
# netdata python.d.plugin configuration for example
#
# This file is in YaML format. Generally the format is:
#
# name: value
#
# There are 2 sections:
#  - global variables
#  - one or more JOBS
#
# JOBS allow you to collect values from multiple sources.
# Each source will have its own set of charts.
#
# JOB parameters have to be indented (using spaces only, example below).

# ----------------------------------------------------------------------
# Global Variables
# These variables set the defaults for all JOBs, however each JOB
# may define its own, overriding the defaults.

# update_every sets the default data collection frequency.
# If unset, the python.d.plugin default is used.
update_every: 5

# priority controls the order of charts at the netdata dashboard.
# Lower numbers move the charts towards the top of the page.
# If unset, the default for python.d.plugin is used.
# priority: 60000

# penalty indicates whether to apply penalty to update_every in case of failures.
# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
# penalty: yes

# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
# Attempts to start the job are made once every autodetection_retry.
# This feature is disabled by default.
# autodetection_retry: 0

# ----------------------------------------------------------------------
# JOBS (data collection sources)
#
# The default JOBS share the same *name*. JOBS with the same name
# are mutually exclusive. Only one of them will be allowed running at
# any time. This allows autodetection to try several alternatives and
# pick the one that works.
#
# Any number of jobs is supported.
#
# All python.d.plugin JOBS (for all its modules) support a set of
# predefined parameters. These are:
#
# job_name:
#     name: myname            # the JOB's name as it will appear at the
#                             # dashboard (by default is the job_name)
#                             # JOBs sharing a name are mutually exclusive
#     update_every: 1         # the JOB's data collection frequency
#     priority: 60000         # the JOB's order on the dashboard
#     penalty: yes            # the JOB's penalty
#     autodetection_retry: 0  # the JOB's re-check interval in seconds
#
# Additionally to the above, example also supports the following:
#
# - none
#
# ----------------------------------------------------------------------
# AUTO-DETECTION JOBS
# only one of them will run (they have the same name)

local:
    name: 'local'

    # what host to pull data from
    host: '127.0.0.1:19999'

    # what charts to pull data for - A regex like 'system\..*|' or 'system\..*|apps.cpu|apps.mem' etc.
    charts_regex: 'system\..*'

    # Charts to exclude, useful if you would like to exclude some specific charts. 
    # Note: should be a ',' separated string like 'chart.name,chart.name'.
    charts_to_exclude: 'system.uptime'

    # length of time to base calculations off for mean and stddev
    train_secs: 14400 # use last 4 hours to work out the mean and stddev for the zscore

    # offset preceeding latest data to ignore when calculating mean and stddev
    offset_secs: 300 # ignore last 5 minutes of data when calculating the mean and stddev

    # recalculate the mean and stddev every n steps of the collector
    train_every_n: 900 # recalculate mean and stddev every 15 minutes

    # smooth the z score by averaging it over last n values
    z_smooth_n: 15 # take a rolling average of the last 15 zscore values to reduce sensitivity to temporary 'spikes'

    # cap absolute value of zscore (before smoothing) for better stability
    z_clip: 10 # cap each zscore at 10 so as to avoid really large individual zscores swamping any rolling average

    # set z_abs: 'true' to make all zscores be absolute values only.
    z_abs: 'true'

    # burn in period in which to initially calculate mean and stddev on every step
    burn_in: 2 # on startup of the collector continually update the mean and stddev in case any gaps or inital calculations fail to return

    # mode can be to get a zscore 'per_dim' or 'per_chart'
    mode: 'per_chart' # 'per_chart' means individual dimension level smoothed zscores will be aggregated to one zscore per chart per time step

    # per_chart_agg is how you aggregate from dimension to chart when mode='per_chart'
    per_chart_agg: 'mean' # 'absmax' will take the max absolute value accross all dimensions but will maintain the sign. 'mean' will just average.