summaryrefslogtreecommitdiffstats
path: root/web/api
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-04 14:31:17 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-04 14:31:17 +0000
commit8020f71afd34d7696d7933659df2d763ab05542f (patch)
tree2fdf1b5447ffd8bdd61e702ca183e814afdcb4fc /web/api
parentInitial commit. (diff)
downloadnetdata-8020f71afd34d7696d7933659df2d763ab05542f.tar.xz
netdata-8020f71afd34d7696d7933659df2d763ab05542f.zip
Adding upstream version 1.37.1.upstream/1.37.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'web/api')
-rw-r--r--web/api/Makefile.am21
-rw-r--r--web/api/README.md12
-rw-r--r--web/api/badges/Makefile.am8
-rw-r--r--web/api/badges/README.md365
-rw-r--r--web/api/badges/web_buffer_svg.c1156
-rw-r--r--web/api/badges/web_buffer_svg.h18
-rw-r--r--web/api/exporters/Makefile.am13
-rw-r--r--web/api/exporters/README.md10
-rw-r--r--web/api/exporters/allmetrics.c133
-rw-r--r--web/api/exporters/allmetrics.h12
-rw-r--r--web/api/exporters/prometheus/Makefile.am8
-rw-r--r--web/api/exporters/prometheus/README.md10
-rw-r--r--web/api/exporters/shell/Makefile.am8
-rw-r--r--web/api/exporters/shell/README.md69
-rw-r--r--web/api/exporters/shell/allmetrics_shell.c169
-rw-r--r--web/api/exporters/shell/allmetrics_shell.h21
-rw-r--r--web/api/formatters/Makefile.am15
-rw-r--r--web/api/formatters/README.md78
-rw-r--r--web/api/formatters/charts2json.c191
-rw-r--r--web/api/formatters/charts2json.h12
-rw-r--r--web/api/formatters/csv/Makefile.am8
-rw-r--r--web/api/formatters/csv/README.md144
-rw-r--r--web/api/formatters/csv/csv.c142
-rw-r--r--web/api/formatters/csv/csv.h12
-rw-r--r--web/api/formatters/json/Makefile.am8
-rw-r--r--web/api/formatters/json/README.md156
-rw-r--r--web/api/formatters/json/json.c283
-rw-r--r--web/api/formatters/json/json.h10
-rw-r--r--web/api/formatters/json_wrapper.c423
-rw-r--r--web/api/formatters/json_wrapper.h15
-rw-r--r--web/api/formatters/rrd2json.c339
-rw-r--r--web/api/formatters/rrd2json.h84
-rw-r--r--web/api/formatters/rrdset2json.c155
-rw-r--r--web/api/formatters/rrdset2json.h10
-rw-r--r--web/api/formatters/ssv/Makefile.am8
-rw-r--r--web/api/formatters/ssv/README.md59
-rw-r--r--web/api/formatters/ssv/ssv.c45
-rw-r--r--web/api/formatters/ssv/ssv.h10
-rw-r--r--web/api/formatters/value/Makefile.am8
-rw-r--r--web/api/formatters/value/README.md24
-rw-r--r--web/api/formatters/value/value.c162
-rw-r--r--web/api/formatters/value/value.h31
-rw-r--r--web/api/health/Makefile.am8
-rw-r--r--web/api/health/README.md225
-rw-r--r--web/api/health/health_cmdapi.c206
-rw-r--r--web/api/health/health_cmdapi.h31
-rw-r--r--web/api/netdata-swagger.json3288
-rw-r--r--web/api/netdata-swagger.yaml2614
-rw-r--r--web/api/queries/Makefile.am23
-rw-r--r--web/api/queries/README.md176
-rw-r--r--web/api/queries/average/Makefile.am8
-rw-r--r--web/api/queries/average/README.md46
-rw-r--r--web/api/queries/average/average.c59
-rw-r--r--web/api/queries/average/average.h15
-rw-r--r--web/api/queries/countif/Makefile.am8
-rw-r--r--web/api/queries/countif/README.md36
-rw-r--r--web/api/queries/countif/countif.c136
-rw-r--r--web/api/queries/countif/countif.h15
-rw-r--r--web/api/queries/des/Makefile.am8
-rw-r--r--web/api/queries/des/README.md73
-rw-r--r--web/api/queries/des/des.c137
-rw-r--r--web/api/queries/des/des.h17
-rw-r--r--web/api/queries/incremental_sum/Makefile.am8
-rw-r--r--web/api/queries/incremental_sum/README.md41
-rw-r--r--web/api/queries/incremental_sum/incremental_sum.c66
-rw-r--r--web/api/queries/incremental_sum/incremental_sum.h15
-rw-r--r--web/api/queries/max/Makefile.am8
-rw-r--r--web/api/queries/max/README.md38
-rw-r--r--web/api/queries/max/max.c57
-rw-r--r--web/api/queries/max/max.h15
-rw-r--r--web/api/queries/median/Makefile.am8
-rw-r--r--web/api/queries/median/README.md60
-rw-r--r--web/api/queries/median/median.c140
-rw-r--r--web/api/queries/median/median.h23
-rw-r--r--web/api/queries/min/Makefile.am8
-rw-r--r--web/api/queries/min/README.md38
-rw-r--r--web/api/queries/min/min.c57
-rw-r--r--web/api/queries/min/min.h15
-rw-r--r--web/api/queries/percentile/Makefile.am8
-rw-r--r--web/api/queries/percentile/README.md58
-rw-r--r--web/api/queries/percentile/percentile.c169
-rw-r--r--web/api/queries/percentile/percentile.h23
-rw-r--r--web/api/queries/query.c2175
-rw-r--r--web/api/queries/query.h59
-rw-r--r--web/api/queries/rrdr.c101
-rw-r--r--web/api/queries/rrdr.h152
-rw-r--r--web/api/queries/ses/Makefile.am8
-rw-r--r--web/api/queries/ses/README.md61
-rw-r--r--web/api/queries/ses/ses.c90
-rw-r--r--web/api/queries/ses/ses.h17
-rw-r--r--web/api/queries/stddev/Makefile.am8
-rw-r--r--web/api/queries/stddev/README.md93
-rw-r--r--web/api/queries/stddev/stddev.c173
-rw-r--r--web/api/queries/stddev/stddev.h18
-rw-r--r--web/api/queries/sum/Makefile.am8
-rw-r--r--web/api/queries/sum/README.md41
-rw-r--r--web/api/queries/sum/sum.c55
-rw-r--r--web/api/queries/sum/sum.h15
-rw-r--r--web/api/queries/trimmed_mean/Makefile.am8
-rw-r--r--web/api/queries/trimmed_mean/README.md56
-rw-r--r--web/api/queries/trimmed_mean/trimmed_mean.c166
-rw-r--r--web/api/queries/trimmed_mean/trimmed_mean.h22
-rw-r--r--web/api/queries/weights.c1107
-rw-r--r--web/api/queries/weights.h33
-rw-r--r--web/api/tests/valid_urls.c789
-rw-r--r--web/api/tests/web_api.c473
-rw-r--r--web/api/web_api_v1.c1707
-rw-r--r--web/api/web_api_v1.h41
108 files changed, 19925 insertions, 0 deletions
diff --git a/web/api/Makefile.am b/web/api/Makefile.am
new file mode 100644
index 0000000..7255ac8
--- /dev/null
+++ b/web/api/Makefile.am
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+SUBDIRS = \
+ badges \
+ queries \
+ exporters \
+ formatters \
+ health \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+dist_web_DATA = \
+ netdata-swagger.yaml \
+ netdata-swagger.json \
+ $(NULL)
diff --git a/web/api/README.md b/web/api/README.md
new file mode 100644
index 0000000..fc520a0
--- /dev/null
+++ b/web/api/README.md
@@ -0,0 +1,12 @@
+<!--
+title: "API"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/README.md
+-->
+
+# API
+
+## Netdata REST API
+
+The complete documentation of the Netdata API is available as a Swagger API document [in our GitHub repository](https://raw.githubusercontent.com/netdata/netdata/master/web/api/netdata-swagger.yaml). You can view it online using the **[Swagger Editor](https://editor.swagger.io/?url=https://raw.githubusercontent.com/netdata/netdata/master/web/api/netdata-swagger.yaml)**.
+
+If your prefer it over the Swagger Editor, you can also use [Swagger UI](https://github.com/swagger-api/swagger-ui) by pointing at `web/api/netdata-swagger.yaml` in the Netdata source tree (or at https://raw.githubusercontent.com/netdata/netdata/master/web/api/netdata-swagger.yaml if you want to use the Swagger API definitions directly from our GitHub repository). This however does not provide all the information available.
diff --git a/web/api/badges/Makefile.am b/web/api/badges/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/web/api/badges/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/badges/README.md b/web/api/badges/README.md
new file mode 100644
index 0000000..8440947
--- /dev/null
+++ b/web/api/badges/README.md
@@ -0,0 +1,365 @@
+<!--
+title: "Netdata badges"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/badges/README.md
+-->
+
+# Netdata badges
+
+**Badges are cool!**
+
+Netdata can generate badges for any chart and any dimension at any time-frame. Badges come in `SVG` and can be added to any web page using an `<IMG>` HTML tag.
+
+**Netdata badges are powerful**!
+
+Given that Netdata collects from **1.000** to **5.000** metrics per server (depending on the number of network interfaces, disks, cpu cores, applications running, users logged in, containers running, etc) and that Netdata already has data reduction/aggregation functions embedded, the badges can be quite powerful.
+
+For each metric/dimension and for arbitrary time-frames badges can show **min**, **max** or **average** value, but also **sum** or **incremental-sum** to have their **volume**.
+
+For example, there is [a chart in Netdata that shows the current requests/s of nginx](http://london.my-netdata.io/#nginx_local_nginx). Using this chart alone we can show the following badges (we could add more time-frames, like **today**, **yesterday**, etc):
+
+<a href="https://registry.my-netdata.io/#nginx_local_nginx"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=nginx_local.connections&dimensions=active&value_color=grey:null%7Cblue&label=nginx%20active%20connections%20now&units=null&precision=0"/></a> <a href="https://registry.my-netdata.io/#nginx_local_nginx"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=nginx_local.connections&dimensions=active&after=-3600&value_color=orange&label=last%20hour%20average&units=null&options=unaligned&precision=0"/></a> <a href="https://registry.my-netdata.io/#nginx_local_nginx"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=nginx_local.connections&dimensions=active&group=max&after=-3600&value_color=red&label=last%20hour%20max&units=null&options=unaligned&precision=0"/></a>
+
+Similarly, there is [a chart that shows outbound bandwidth per class](http://london.my-netdata.io/#tc_eth0), using QoS data. So it shows `kilobits/s` per class. Using this chart we can show:
+
+<a href="https://registry.my-netdata.io/#tc_eth0"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=tc.world_out&dimensions=web_server&value_color=green&label=web%20server%20sends%20now&units=kbps"/></a> <a href="https://registry.my-netdata.io/#tc_eth0"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=tc.world_out&dimensions=web_server&after=-86400&options=unaligned&group=sum&divide=8388608&value_color=blue&label=web%20server%20sent%20today&units=GB"/></a>
+
+The right one is a **volume** calculation. Netdata calculated the total of the last 86.400 seconds (a day) which gives `kilobits`, then divided it by 8 to make it KB, then by 1024 to make it MB and then by 1024 to make it GB. Calculations like this are quite accurate, since for every value collected, every second, Netdata interpolates it to second boundary using microsecond calculations.
+
+Let's see a few more badge examples (they come from the [Netdata registry](/registry/README.md)):
+
+- **cpu usage of user `root`** (you can pick any user; 100% = 1 core). This will be `green <10%`, `yellow <20%`, `orange <50%`, `blue <100%` (1 core), `red` otherwise (you define thresholds and colors on the URL).
+
+ <a href="https://registry.my-netdata.io/#apps_cpu"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=users.cpu&dimensions=root&value_color=grey:null%7Cgreen%3C10%7Cyellow%3C20%7Corange%3C50%7Cblue%3C100%7Cred&label=root%20user%20cpu%20now&units=%25"></img></a> <a href="https://registry.my-netdata.io/#apps_cpu"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=users.cpu&dimensions=root&after=-3600&value_color=grey:null%7Cgreen%3C10%7Cyellow%3C20%7Corange%3C50%7Cblue%3C100%7Cred&label=root%20user%20average%20cpu%20last%20hour&units=%25"></img></a>
+
+- **mysql queries per second**
+
+ <a href="https://registry.my-netdata.io/#mysql_local"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=mysql_local.queries&dimensions=questions&label=mysql%20queries%20now&value_color=red&units=%5Cs"></img></a> <a href="https://registry.my-netdata.io/#mysql_local"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=mysql_local.queries&dimensions=questions&after=-3600&options=unaligned&group=sum&label=mysql%20queries%20this%20hour&value_color=green&units=null"></img></a> <a href="https://registry.my-netdata.io/#mysql_local"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=mysql_local.queries&dimensions=questions&after=-86400&options=unaligned&group=sum&label=mysql%20queries%20today&value_color=blue&units=null"></img></a>
+
+ niche ones: **mysql SELECT statements with JOIN, which did full table scans**:
+
+ <a href="https://registry.my-netdata.io/#mysql_local_issues"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=mysql_local.join_issues&dimensions=scan&after=-3600&label=full%20table%20scans%20the%20last%20hour&value_color=orange&group=sum&units=null"></img></a>
+
+---
+
+> So, every single line on the charts of a [Netdata dashboard](http://london.my-netdata.io/), can become a badge and this badge can calculate **average**, **min**, **max**, or **volume** for any time-frame! And you can also vary the badge color using conditions on the calculated value.
+
+---
+
+## How to create badges
+
+The basic URL is `http://your.netdata:19999/api/v1/badge.svg?option1&option2&option3&...`.
+
+Here is what you can put for `options` (these are standard Netdata API options):
+
+- `chart=CHART.NAME`
+
+ The chart to get the values from.
+
+ **This is the only parameter required** and with just this parameter, Netdata will return the sum of the latest values of all chart dimensions.
+
+ Example:
+
+```html
+ <a href="#">
+ <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu"></img>
+ </a>
+```
+
+ Which produces this:
+
+ <a href="#">
+ <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu"></img>
+ </a>
+
+- `alarm=NAME`
+
+ Render the current value and status of an alarm linked to the chart. This option can be ignored if the badge to be generated is not related to an alarm.
+
+ The current value of the alarm will be rendered. The color of the badge will indicate the status of the alarm.
+
+ For alarm badges, **both `chart` and `alarm` parameters are required**.
+
+- `dimensions=DIMENSION1|DIMENSION2|...`
+
+ The dimensions of the chart to use. If you don't set any dimension, all will be used. When multiple dimensions are used, Netdata will sum their values. You can append `options=absolute` if you want this sum to convert all values to positive before adding them.
+
+ Pipes in HTML have to escaped with `%7C`.
+
+ Example:
+
+```html
+ <a href="#">
+ <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&dimensions=system%7Cnice"></img>
+ </a>
+```
+
+ Which produces this:
+
+ <a href="#">
+ <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&dimensions=system%7Cnice"></img>
+ </a>
+
+- `before=SECONDS` and `after=SECONDS`
+
+ The timeframe. These can be absolute unix timestamps, or relative to now, number of seconds. By default `before=0` and `after=-1` (1 second in the past).
+
+ To get the last minute set `after=-60`. This will give the average of the last complete minute (XX:XX:00 - XX:XX:59).
+
+ To get the max of the last hour set `after=-3600&group=max`. This will give the maximum value of the last complete hour (XX:00:00 - XX:59:59)
+
+ Example:
+
+```html
+ <a href="#">
+ <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&after=-60"></img>
+ </a>
+```
+
+ Which produces the average of last complete minute (XX:XX:00 - XX:XX:59):
+
+ <a href="#">
+ <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&after=-60"></img>
+ </a>
+
+ While this is the previous minute (one minute before the last one, again aligned XX:XX:00 - XX:XX:59):
+
+```html
+ <a href="#">
+ <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&before=-60&after=-60"></img>
+ </a>
+```
+
+ It produces this:
+
+ <a href="#">
+ <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&before=-60&after=-60"></img>
+ </a>
+
+- `group=min` or `group=max` or `group=average` (the default) or `group=sum` or `group=incremental-sum`
+
+ If Netdata will have to reduce (aggregate) the data to calculate the value, which aggregation method to use.
+
+ - `max` will find the max value for the timeframe. This works on both positive and negative dimensions. It will find the most extreme value.
+
+ - `min` will find the min value for the timeframe. This works on both positive and negative dimensions. It will find the number closest to zero.
+
+ - `average` will calculate the average value for the timeframe.
+
+ - `sum` will sum all the values for the timeframe. This is nice for finding the volume of dimensions for a timeframe. So if you have a dimension that reports `X per second`, you can find the volume of the dimension in a timeframe, by adding its values in that timeframe.
+
+ - `incremental-sum` will sum the difference of each value to its next. Let's assume you have a dimension that does not measure the rate of something, but the absolute value of it. So it has values like this "1, 5, 3, 7, 4". `incremental-sum` will calculate the difference of adjacent values. In this example, they will be `(5 - 1) + (3 - 5) + (7 - 3) + (4 - 7) = 3` (which is equal to the last value minus the first = 4 - 1).
+
+- `options=opt1|opt2|opt3|...`
+
+ These fine tune various options of the API. Here is what you can use for badges (the API has more option, but only these are useful for badges):
+
+ - `percentage`, instead of returning the value, calculate the percentage of the sum of the selected dimensions, versus the sum of all the dimensions of the chart. This also sets the units to `%`.
+
+ - `absolute` or `abs`, turn all values positive and then sum them.
+
+ - `display_absolute` or `display-absolute`, to use the signed value during color calculation, but display the absolute value on the badge.
+
+ - `min2max`, when multiple dimensions are given, do not sum them, but take their `max - min`.
+
+ - `unaligned`, when data are reduced / aggregated (e.g. the request is about the average of the last minute, or hour), Netdata by default aligns them so that the charts will have a constant shape (so average per minute returns always XX:XX:00 - XX:XX:59). Setting the `unaligned` option, Netdata will aggregate data without any alignment, so if the request is for 60 seconds, it will aggregate the latest 60 seconds of collected data.
+
+These are options dedicated to badges:
+
+- `label=TEXT`
+
+ The label of the badge.
+
+- `units=TEXT`
+
+ The units of the badge. If you want to put a `/`, please put a `\`. This is because Netdata allows badges parameters to be given as path in URL, instead of query string. You can also use `null` or `empty` to show it without any units.
+
+ The units `seconds`, `minutes` and `hours` trigger special formatting. The value has to be in this unit, and Netdata will automatically change it to show a more pretty duration.
+
+- `multiply=NUMBER`
+
+ Multiply the value with this number. The default is `1`.
+
+- `divide=NUMBER`
+
+ Divide the value with this number. The default is `1`.
+
+- Color customization parameters
+
+ The following parameters specify colors of each individual part of the badge. Each parameter is documented in detail
+ below.
+
+ | Area of badge | Background color parameter | Text color parameter |
+ | ---: | :------------------------: | :------------------: |
+ | Label (left) part | `label_color` | `text_color_lbl` |
+ | Value (right) part | `value_color` | `text_color_val` |
+
+ - `label_color=COLOR`
+
+ The color of the label (the left part). You can use any HTML color in `RGB` or `RRGGBB` hex notation (without
+ the `#` character at the beginning). Additionally, you can use one of the following predefined colors (and you
+ can use them by their name):
+
+ - `green`
+ - `brightgreen`
+ - `yellow`
+ - `yellowgreen`
+ - `orange`
+ - `red`
+ - `blue`
+ - `grey`
+ - `gray`
+ - `lightgrey`
+ - `lightgray`
+
+ These colors are taken from <https://github.com/badges/shields>, which makes them compatible with standard
+ badges.
+
+ - `value_color=COLOR:null|COLOR<VALUE|COLOR>VALUE|COLOR>=VALUE|COLOR<=VALUE|...`
+
+ You can add a pipe delimited list of conditions to pick the value color. The first matching (left to right) will
+ be used.
+
+ Example: `value_color=grey:null|green<10|yellow<100|orange<1000|blue<10000|red`
+
+ The above will set `grey` if no value exists (not collected within the `gap when lost iterations above` in
+ `netdata.conf` for the chart), `green` if the value is less than 10, `yellow` if the value is less than 100, and
+ so on. Netdata will use `red` if no other conditions match. Only integers are supported as values.
+
+ The supported operators are `<`, `>`, `<=`, `>=`, `=` (or `:`), and `!=` (or `<>`).
+
+ You can also use the same syntax as the `label_color` parameter to define each of these colors. You can
+ reference a predefined color by name or `RGB`/`RRGGBB` hex notation.
+
+ - `text_color_lbl=RGB` or `text_color_lbl=RRGGBB` or `text_color_lbl=color_by_name`
+
+ This value specifies the font color for the font of left/label side of the badge. The syntax is the same as the
+ `label_color` parameter. If not given, or given with an empty value, Netdata will use the default color.
+
+ - `text_color_val=RGB` or `text_color_val=RRGGBB` or `text_color_lbl=color_by_name`
+
+ This value specifies the font color for the font of right/value side of the badge. The syntax is the same as the
+ `label_color` parameter. If not given, or given with an empty value, Netdata will use the default color.
+
+- `precision=NUMBER`
+
+ The number of decimal digits of the value. By default Netdata will add:
+
+ - no decimal digits for values > 1000
+ - 1 decimal digit for values > 100
+ - 2 decimal digits for values > 1
+ - 3 decimal digits for values > 0.1
+ - 4 decimal digits for values \<= 0.1
+
+ Using the `precision=NUMBER` you can set your preference per badge.
+
+- `scale=XXX`
+
+ This option scales the svg image. It accepts values above or equal to 100 (100% is the default scale). For example, lets get a few different sizes:
+
+ <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&after=-60&scale=100"></img> original<br/>
+ <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&after=-60&scale=125"></img> `scale=125`<br/>
+ <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&after=-60&scale=150"></img> `scale=150`<br/>
+ <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&after=-60&scale=175"></img> `scale=175`<br/>
+ <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&after=-60&scale=200"></img> `scale=200`
+
+- `fixed_width_lbl=NUMBER` and `fixed_width_val=NUMBER`
+
+ This parameter overrides auto-sizing of badges and displays them at fixed widths. `fixed_width_lbl` determines the size of the label's left side (label/name). `fixed_width_val` determines the size of the the label's right side (value). You must set both parameters together, or they will be ignored.
+
+ You should set the label/value widths wide enough to provide space for all the possible values/contents of the badge you're requesting. In case the text cannot fit the space given it will be clipped.
+
+ The `scale` parameter still applies on the values you give to `fixed_width_lbl` and `fixed_width_val`.
+
+- `refresh=auto` or `refresh=SECONDS`
+
+ This option enables auto-refreshing of images. Netdata will send the HTTP header `Refresh: SECONDS` to the web browser, thus requesting automatic refresh of the images at regular intervals.
+
+ `auto` will calculate the proper `SECONDS` to avoid unnecessary refreshes. If `SECONDS` is zero, this feature is disabled (it is also disabled by default).
+
+ Auto-refreshing like this, works only if you access the badge directly. So, you may have to put it an `embed` or `iframe` for it to be auto-refreshed. Use something like this:
+
+```html
+<embed src="BADGE_URL" type="image/svg+xml" height="20" />
+```
+
+ Another way is to use javascript to auto-refresh them. You can auto-refresh all the Netdata badges on a page using javascript. You have to add a class to all the Netdata badges, like this `<img class="netdata-badge" src="..."/>`. Then add this javascript code to your page (it requires jquery):
+
+```html
+<script>
+ var NETDATA_BADGES_AUTOREFRESH_SECONDS = 5;
+ function refreshNetdataBadges() {
+ var now = new Date().getTime().toString();
+ $('.netdata-badge').each(function() {
+ this.src = this.src.replace(/\&_=\d*/, '') + '&_=' + now;
+ });
+ setTimeout(refreshNetdataBadges, NETDATA_BADGES_AUTOREFRESH_SECONDS * 1000);
+ }
+ setTimeout(refreshNetdataBadges, NETDATA_BADGES_AUTOREFRESH_SECONDS * 1000);
+</script>
+```
+
+A more advanced badges refresh method is to include `http://your.netdata.ip:19999/refresh-badges.js` in your page.
+
+---
+
+## Escaping URLs
+
+Keep in mind that if you add badge URLs to your HTML pages you have to escape the special characters:
+
+|character|name|escape sequence|
+|:-------:|:--:|:-------------:|
+|``|space (in labels and units)|`%20`|
+|`#`|hash (for colors)|`%23`|
+|`%`|percent (in units)|`%25`|
+|`<`|less than|`%3C`|
+|`>`|greater than|`%3E`|
+|`\`|backslash (when you need a `/`)|`%5C`|
+|`\|`|pipe (delimiting parameters)|`%7C`|
+
+## FAQ
+
+#### Is it fast?
+
+On modern hardware, Netdata can generate about **2.000 badges per second per core**, before noticing any delays. It generates a badge in about half a millisecond!
+
+Of course these timing are for badges that use recent data. If you need badges that do calculations over long durations (a day, or more), timing will differ. Netdata logs its timings at its `access.log`, so take a look there before adding a heavy badge on a busy web site. Of course, you can cache such badges or have a cron job get them from Netdata and save them at your web server at regular intervals.
+
+#### Embedding badges in GitHub
+
+You have 2 options:
+- SVG images with markdown
+- SVG images with HTML (directly in .md files)
+
+For example, this is the cpu badge shown above:
+
+- Markdown example:
+
+```md
+[![A nice name](https://registry.my-netdata.io/api/v1/badge.svg?chart=users.cpu&dimensions=root&value_color=grey:null%7Cgreen%3C10%7Cyellow%3C20%7Corange%3C50%7Cblue%3C100%7Cred&label=root%20user%20cpu%20now&units=%25)](https://registry.my-netdata.io/#apps_cpu)
+```
+
+- HTML example:
+
+```html
+<a href="https://registry.my-netdata.io/#apps_cpu">
+ <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=users.cpu&dimensions=root&value_color=grey:null%7Cgreen%3C10%7Cyellow%3C20%7Corange%3C50%7Cblue%3C100%7Cred&label=root%20user%20cpu%20now&units=%25"></img>
+</a>
+```
+
+Both produce this:
+
+<a href="https://registry.my-netdata.io/#apps_cpu">
+ <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=users.cpu&dimensions=root&value_color=grey:null%7Cgreen%3C10%7Cyellow%3C20%7Corange%3C50%7Cblue%3C100%7Cred&label=root%20user%20cpu%20now&units=%25"></img>
+</a>
+
+#### Auto-refreshing badges in GitHub
+
+Unfortunately it cannot be done. GitHub fetches all the images using a proxy and rewrites all the URLs to be served by the proxy.
+
+You can refresh them from your browser console though. Press F12 to open the web browser console (switch to the console too), paste the following and press enter. They will refresh:
+
+```js
+var len = document.images.length; while(len--) { document.images[len].src = document.images[len].src.replace(/\?cacheBuster=\d*/, "") + "?cacheBuster=" + new Date().getTime().toString(); };
+```
+
+
diff --git a/web/api/badges/web_buffer_svg.c b/web/api/badges/web_buffer_svg.c
new file mode 100644
index 0000000..080f224
--- /dev/null
+++ b/web/api/badges/web_buffer_svg.c
@@ -0,0 +1,1156 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "web_buffer_svg.h"
+
+#define BADGE_HORIZONTAL_PADDING 4
+#define VERDANA_KERNING 0.2
+#define VERDANA_PADDING 1.0
+
+/*
+ * verdana11_widths[] has been generated with this method:
+ * https://github.com/badges/shields/blob/master/measure-text.js
+*/
+
+static double verdana11_widths[128] = {
+ [0] = 0.0,
+ [1] = 0.0,
+ [2] = 0.0,
+ [3] = 0.0,
+ [4] = 0.0,
+ [5] = 0.0,
+ [6] = 0.0,
+ [7] = 0.0,
+ [8] = 0.0,
+ [9] = 0.0,
+ [10] = 0.0,
+ [11] = 0.0,
+ [12] = 0.0,
+ [13] = 0.0,
+ [14] = 0.0,
+ [15] = 0.0,
+ [16] = 0.0,
+ [17] = 0.0,
+ [18] = 0.0,
+ [19] = 0.0,
+ [20] = 0.0,
+ [21] = 0.0,
+ [22] = 0.0,
+ [23] = 0.0,
+ [24] = 0.0,
+ [25] = 0.0,
+ [26] = 0.0,
+ [27] = 0.0,
+ [28] = 0.0,
+ [29] = 0.0,
+ [30] = 0.0,
+ [31] = 0.0,
+ [32] = 3.8671874999999996, //
+ [33] = 4.3291015625, // !
+ [34] = 5.048828125, // "
+ [35] = 9.001953125, // #
+ [36] = 6.9931640625, // $
+ [37] = 11.837890625, // %
+ [38] = 7.992187499999999, // &
+ [39] = 2.9541015625, // '
+ [40] = 4.9951171875, // (
+ [41] = 4.9951171875, // )
+ [42] = 6.9931640625, // *
+ [43] = 9.001953125, // +
+ [44] = 4.00146484375, // ,
+ [45] = 4.9951171875, // -
+ [46] = 4.00146484375, // .
+ [47] = 4.9951171875, // /
+ [48] = 6.9931640625, // 0
+ [49] = 6.9931640625, // 1
+ [50] = 6.9931640625, // 2
+ [51] = 6.9931640625, // 3
+ [52] = 6.9931640625, // 4
+ [53] = 6.9931640625, // 5
+ [54] = 6.9931640625, // 6
+ [55] = 6.9931640625, // 7
+ [56] = 6.9931640625, // 8
+ [57] = 6.9931640625, // 9
+ [58] = 4.9951171875, // :
+ [59] = 4.9951171875, // ;
+ [60] = 9.001953125, // <
+ [61] = 9.001953125, // =
+ [62] = 9.001953125, // >
+ [63] = 5.99951171875, // ?
+ [64] = 11.0, // @
+ [65] = 7.51953125, // A
+ [66] = 7.541015625, // B
+ [67] = 7.680664062499999, // C
+ [68] = 8.4755859375, // D
+ [69] = 6.95556640625, // E
+ [70] = 6.32177734375, // F
+ [71] = 8.529296875, // G
+ [72] = 8.26611328125, // H
+ [73] = 4.6298828125, // I
+ [74] = 5.00048828125, // J
+ [75] = 7.62158203125, // K
+ [76] = 6.123046875, // L
+ [77] = 9.2705078125, // M
+ [78] = 8.228515625, // N
+ [79] = 8.658203125, // O
+ [80] = 6.63330078125, // P
+ [81] = 8.658203125, // Q
+ [82] = 7.6484375, // R
+ [83] = 7.51953125, // S
+ [84] = 6.7783203125, // T
+ [85] = 8.05126953125, // U
+ [86] = 7.51953125, // V
+ [87] = 10.87646484375, // W
+ [88] = 7.53564453125, // X
+ [89] = 6.767578125, // Y
+ [90] = 7.53564453125, // Z
+ [91] = 4.9951171875, // [
+ [92] = 4.9951171875, // backslash
+ [93] = 4.9951171875, // ]
+ [94] = 9.001953125, // ^
+ [95] = 6.9931640625, // _
+ [96] = 6.9931640625, // `
+ [97] = 6.6064453125, // a
+ [98] = 6.853515625, // b
+ [99] = 5.73095703125, // c
+ [100] = 6.853515625, // d
+ [101] = 6.552734375, // e
+ [102] = 3.8671874999999996, // f
+ [103] = 6.853515625, // g
+ [104] = 6.9609375, // h
+ [105] = 3.0185546875, // i
+ [106] = 3.78662109375, // j
+ [107] = 6.509765625, // k
+ [108] = 3.0185546875, // l
+ [109] = 10.69921875, // m
+ [110] = 6.9609375, // n
+ [111] = 6.67626953125, // o
+ [112] = 6.853515625, // p
+ [113] = 6.853515625, // q
+ [114] = 4.6943359375, // r
+ [115] = 5.73095703125, // s
+ [116] = 4.33447265625, // t
+ [117] = 6.9609375, // u
+ [118] = 6.509765625, // v
+ [119] = 9.001953125, // w
+ [120] = 6.509765625, // x
+ [121] = 6.509765625, // y
+ [122] = 5.779296875, // z
+ [123] = 6.982421875, // {
+ [124] = 4.9951171875, // |
+ [125] = 6.982421875, // }
+ [126] = 9.001953125, // ~
+ [127] = 0.0
+};
+
+// find the width of the string using the verdana 11points font
+static inline double verdana11_width(const char *s, float em_size) {
+ double w = 0.0;
+
+ while(*s) {
+ // if UTF8 multibyte char found and guess it's width equal 1em
+ // as label width will be updated with JavaScript this is not so important
+
+ // TODO: maybe move UTF8 functions from url.c to separate util in libnetdata
+ // then use url_utf8_get_byte_length etc.
+ if(IS_UTF8_STARTBYTE(*s)) {
+ s++;
+ while(IS_UTF8_BYTE(*s) && !IS_UTF8_STARTBYTE(*s)){
+ s++;
+ }
+ w += em_size;
+ }
+ else {
+ if(likely(!(*s & 0x80))){ // Byte 1XXX XXXX is not valid in UTF8
+ double t = verdana11_widths[(unsigned char)*s];
+ if(t != 0.0)
+ w += t + VERDANA_KERNING;
+ }
+ s++;
+ }
+ }
+
+ w -= VERDANA_KERNING;
+ w += VERDANA_PADDING;
+ return w;
+}
+
+static inline size_t escape_xmlz(char *dst, const char *src, size_t len) {
+ size_t i = len;
+
+ // required escapes from
+ // https://github.com/badges/shields/blob/master/badge.js
+ while(*src && i) {
+ switch(*src) {
+ case '\\':
+ *dst++ = '/';
+ src++;
+ i--;
+ break;
+
+ case '&':
+ if(i > 5) {
+ strcpy(dst, "&amp;");
+ i -= 5;
+ dst += 5;
+ src++;
+ }
+ else goto cleanup;
+ break;
+
+ case '<':
+ if(i > 4) {
+ strcpy(dst, "&lt;");
+ i -= 4;
+ dst += 4;
+ src++;
+ }
+ else goto cleanup;
+ break;
+
+ case '>':
+ if(i > 4) {
+ strcpy(dst, "&gt;");
+ i -= 4;
+ dst += 4;
+ src++;
+ }
+ else goto cleanup;
+ break;
+
+ case '"':
+ if(i > 6) {
+ strcpy(dst, "&quot;");
+ i -= 6;
+ dst += 6;
+ src++;
+ }
+ else goto cleanup;
+ break;
+
+ case '\'':
+ if(i > 6) {
+ strcpy(dst, "&apos;");
+ i -= 6;
+ dst += 6;
+ src++;
+ }
+ else goto cleanup;
+ break;
+
+ default:
+ i--;
+ *dst++ = *src++;
+ break;
+ }
+ }
+
+cleanup:
+ *dst = '\0';
+ return len - i;
+}
+
+static inline char *format_value_with_precision_and_unit(char *value_string, size_t value_string_len,
+ NETDATA_DOUBLE value, const char *units, int precision) {
+ if(unlikely(isnan(value) || isinf(value)))
+ value = 0.0;
+
+ char *separator = "";
+ if(unlikely(isalnum(*units)))
+ separator = " ";
+
+ if(precision < 0) {
+ int len, lstop = 0, trim_zeros = 1;
+
+ NETDATA_DOUBLE abs = value;
+ if(isless(value, 0)) {
+ lstop = 1;
+ abs = fabsndd(value);
+ }
+
+ if(isgreaterequal(abs, 1000)) {
+ len = snprintfz(value_string, value_string_len, "%0.0" NETDATA_DOUBLE_MODIFIER, (NETDATA_DOUBLE) value);
+ trim_zeros = 0;
+ }
+ else if(isgreaterequal(abs, 10)) len = snprintfz(value_string, value_string_len, "%0.1" NETDATA_DOUBLE_MODIFIER, (NETDATA_DOUBLE) value);
+ else if(isgreaterequal(abs, 1)) len = snprintfz(value_string, value_string_len, "%0.2" NETDATA_DOUBLE_MODIFIER, (NETDATA_DOUBLE) value);
+ else if(isgreaterequal(abs, 0.1)) len = snprintfz(value_string, value_string_len, "%0.2" NETDATA_DOUBLE_MODIFIER, (NETDATA_DOUBLE) value);
+ else if(isgreaterequal(abs, 0.01)) len = snprintfz(value_string, value_string_len, "%0.4" NETDATA_DOUBLE_MODIFIER, (NETDATA_DOUBLE) value);
+ else if(isgreaterequal(abs, 0.001)) len = snprintfz(value_string, value_string_len, "%0.5" NETDATA_DOUBLE_MODIFIER, (NETDATA_DOUBLE) value);
+ else if(isgreaterequal(abs, 0.0001)) len = snprintfz(value_string, value_string_len, "%0.6" NETDATA_DOUBLE_MODIFIER, (NETDATA_DOUBLE) value);
+ else len = snprintfz(value_string, value_string_len, "%0.7" NETDATA_DOUBLE_MODIFIER, (NETDATA_DOUBLE) value);
+
+ if(unlikely(trim_zeros)) {
+ int l;
+ // remove trailing zeros from the decimal part
+ for(l = len - 1; l > lstop; l--) {
+ if(likely(value_string[l] == '0')) {
+ value_string[l] = '\0';
+ len--;
+ }
+
+ else if(unlikely(value_string[l] == '.')) {
+ value_string[l] = '\0';
+ len--;
+ break;
+ }
+
+ else
+ break;
+ }
+ }
+
+ if(unlikely(len <= 0)) len = 1;
+ snprintfz(&value_string[len], value_string_len - len, "%s%s", separator, units);
+ }
+ else {
+ if(precision > 50) precision = 50;
+ snprintfz(value_string, value_string_len, "%0.*" NETDATA_DOUBLE_MODIFIER "%s%s", precision, (NETDATA_DOUBLE) value, separator, units);
+ }
+
+ return value_string;
+}
+
+typedef enum badge_units_format {
+ UNITS_FORMAT_NONE,
+ UNITS_FORMAT_SECONDS,
+ UNITS_FORMAT_SECONDS_AGO,
+ UNITS_FORMAT_MINUTES,
+ UNITS_FORMAT_MINUTES_AGO,
+ UNITS_FORMAT_HOURS,
+ UNITS_FORMAT_HOURS_AGO,
+ UNITS_FORMAT_ONOFF,
+ UNITS_FORMAT_UPDOWN,
+ UNITS_FORMAT_OKERROR,
+ UNITS_FORMAT_OKFAILED,
+ UNITS_FORMAT_EMPTY,
+ UNITS_FORMAT_PERCENT
+} UNITS_FORMAT;
+
+
+static struct units_formatter {
+ const char *units;
+ uint32_t hash;
+ UNITS_FORMAT format;
+} badge_units_formatters[] = {
+ { "seconds", 0, UNITS_FORMAT_SECONDS },
+ { "seconds ago", 0, UNITS_FORMAT_SECONDS_AGO },
+ { "minutes", 0, UNITS_FORMAT_MINUTES },
+ { "minutes ago", 0, UNITS_FORMAT_MINUTES_AGO },
+ { "hours", 0, UNITS_FORMAT_HOURS },
+ { "hours ago", 0, UNITS_FORMAT_HOURS_AGO },
+ { "on/off", 0, UNITS_FORMAT_ONOFF },
+ { "on-off", 0, UNITS_FORMAT_ONOFF },
+ { "onoff", 0, UNITS_FORMAT_ONOFF },
+ { "up/down", 0, UNITS_FORMAT_UPDOWN },
+ { "up-down", 0, UNITS_FORMAT_UPDOWN },
+ { "updown", 0, UNITS_FORMAT_UPDOWN },
+ { "ok/error", 0, UNITS_FORMAT_OKERROR },
+ { "ok-error", 0, UNITS_FORMAT_OKERROR },
+ { "okerror", 0, UNITS_FORMAT_OKERROR },
+ { "ok/failed", 0, UNITS_FORMAT_OKFAILED },
+ { "ok-failed", 0, UNITS_FORMAT_OKFAILED },
+ { "okfailed", 0, UNITS_FORMAT_OKFAILED },
+ { "empty", 0, UNITS_FORMAT_EMPTY },
+ { "null", 0, UNITS_FORMAT_EMPTY },
+ { "percentage", 0, UNITS_FORMAT_PERCENT },
+ { "percent", 0, UNITS_FORMAT_PERCENT },
+ { "pcent", 0, UNITS_FORMAT_PERCENT },
+
+ // terminator
+ { NULL, 0, UNITS_FORMAT_NONE }
+};
+
+inline char *format_value_and_unit(char *value_string, size_t value_string_len,
+ NETDATA_DOUBLE value, const char *units, int precision) {
+ static int max = -1;
+ int i;
+
+ if(unlikely(max == -1)) {
+ for(i = 0; badge_units_formatters[i].units; i++)
+ badge_units_formatters[i].hash = simple_hash(badge_units_formatters[i].units);
+
+ max = i;
+ }
+
+ if(unlikely(!units)) units = "";
+ uint32_t hash_units = simple_hash(units);
+
+ UNITS_FORMAT format = UNITS_FORMAT_NONE;
+ for(i = 0; i < max; i++) {
+ struct units_formatter *ptr = &badge_units_formatters[i];
+
+ if(hash_units == ptr->hash && !strcmp(units, ptr->units)) {
+ format = ptr->format;
+ break;
+ }
+ }
+
+ if(unlikely(format == UNITS_FORMAT_SECONDS || format == UNITS_FORMAT_SECONDS_AGO)) {
+ if(value == 0.0) {
+ snprintfz(value_string, value_string_len, "%s", "now");
+ return value_string;
+ }
+ else if(isnan(value) || isinf(value)) {
+ snprintfz(value_string, value_string_len, "%s", "undefined");
+ return value_string;
+ }
+
+ const char *suffix = (format == UNITS_FORMAT_SECONDS_AGO)?" ago":"";
+
+ size_t s = (size_t)value;
+ size_t d = s / 86400;
+ s = s % 86400;
+
+ size_t h = s / 3600;
+ s = s % 3600;
+
+ size_t m = s / 60;
+ s = s % 60;
+
+ if(d)
+ snprintfz(value_string, value_string_len, "%zu %s %02zu:%02zu:%02zu%s", d, (d == 1)?"day":"days", h, m, s, suffix);
+ else
+ snprintfz(value_string, value_string_len, "%02zu:%02zu:%02zu%s", h, m, s, suffix);
+
+ return value_string;
+ }
+
+ else if(unlikely(format == UNITS_FORMAT_MINUTES || format == UNITS_FORMAT_MINUTES_AGO)) {
+ if(value == 0.0) {
+ snprintfz(value_string, value_string_len, "%s", "now");
+ return value_string;
+ }
+ else if(isnan(value) || isinf(value)) {
+ snprintfz(value_string, value_string_len, "%s", "undefined");
+ return value_string;
+ }
+
+ const char *suffix = (format == UNITS_FORMAT_MINUTES_AGO)?" ago":"";
+
+ size_t m = (size_t)value;
+ size_t d = m / (60 * 24);
+ m = m % (60 * 24);
+
+ size_t h = m / 60;
+ m = m % 60;
+
+ if(d)
+ snprintfz(value_string, value_string_len, "%zud %02zuh %02zum%s", d, h, m, suffix);
+ else
+ snprintfz(value_string, value_string_len, "%zuh %zum%s", h, m, suffix);
+
+ return value_string;
+ }
+
+ else if(unlikely(format == UNITS_FORMAT_HOURS || format == UNITS_FORMAT_HOURS_AGO)) {
+ if(value == 0.0) {
+ snprintfz(value_string, value_string_len, "%s", "now");
+ return value_string;
+ }
+ else if(isnan(value) || isinf(value)) {
+ snprintfz(value_string, value_string_len, "%s", "undefined");
+ return value_string;
+ }
+
+ const char *suffix = (format == UNITS_FORMAT_HOURS_AGO)?" ago":"";
+
+ size_t h = (size_t)value;
+ size_t d = h / 24;
+ h = h % 24;
+
+ if(d)
+ snprintfz(value_string, value_string_len, "%zud %zuh%s", d, h, suffix);
+ else
+ snprintfz(value_string, value_string_len, "%zuh%s", h, suffix);
+
+ return value_string;
+ }
+
+ else if(unlikely(format == UNITS_FORMAT_ONOFF)) {
+ snprintfz(value_string, value_string_len, "%s", (value != 0.0)?"on":"off");
+ return value_string;
+ }
+
+ else if(unlikely(format == UNITS_FORMAT_UPDOWN)) {
+ snprintfz(value_string, value_string_len, "%s", (value != 0.0)?"up":"down");
+ return value_string;
+ }
+
+ else if(unlikely(format == UNITS_FORMAT_OKERROR)) {
+ snprintfz(value_string, value_string_len, "%s", (value != 0.0)?"ok":"error");
+ return value_string;
+ }
+
+ else if(unlikely(format == UNITS_FORMAT_OKFAILED)) {
+ snprintfz(value_string, value_string_len, "%s", (value != 0.0)?"ok":"failed");
+ return value_string;
+ }
+
+ else if(unlikely(format == UNITS_FORMAT_EMPTY))
+ units = "";
+
+ else if(unlikely(format == UNITS_FORMAT_PERCENT))
+ units = "%";
+
+ if(unlikely(isnan(value) || isinf(value))) {
+ strcpy(value_string, "-");
+ return value_string;
+ }
+
+ return format_value_with_precision_and_unit(value_string, value_string_len, value, units, precision);
+}
+
+static struct badge_color {
+ const char *name;
+ uint32_t hash;
+ const char *color;
+} badge_colors[] = {
+
+ // colors from:
+ // https://github.com/badges/shields/blob/master/colorscheme.json
+
+ { "brightgreen", 0, "4c1" },
+ { "green", 0, "97CA00" },
+ { "yellow", 0, "dfb317" },
+ { "yellowgreen", 0, "a4a61d" },
+ { "orange", 0, "fe7d37" },
+ { "red", 0, "e05d44" },
+ { "blue", 0, "007ec6" },
+ { "grey", 0, "555" },
+ { "gray", 0, "555" },
+ { "lightgrey", 0, "9f9f9f" },
+ { "lightgray", 0, "9f9f9f" },
+
+ // terminator
+ { NULL, 0, NULL }
+};
+
+static inline const char *color_map(const char *color, const char *def) {
+ static int max = -1;
+ int i;
+
+ if(unlikely(max == -1)) {
+ for(i = 0; badge_colors[i].name ;i++)
+ badge_colors[i].hash = simple_hash(badge_colors[i].name);
+
+ max = i;
+ }
+
+ uint32_t hash = simple_hash(color);
+
+ for(i = 0; i < max; i++) {
+ struct badge_color *ptr = &badge_colors[i];
+
+ if(hash == ptr->hash && !strcmp(color, ptr->name))
+ return ptr->color;
+ }
+
+ return def;
+}
+
+typedef enum color_comparison {
+ COLOR_COMPARE_EQUAL,
+ COLOR_COMPARE_NOTEQUAL,
+ COLOR_COMPARE_LESS,
+ COLOR_COMPARE_LESSEQUAL,
+ COLOR_COMPARE_GREATER,
+ COLOR_COMPARE_GREATEREQUAL,
+} BADGE_COLOR_COMPARISON;
+
+static inline void calc_colorz(const char *color, char *final, size_t len, NETDATA_DOUBLE value) {
+ if(isnan(value) || isinf(value))
+ value = NAN;
+
+ char color_buffer[256 + 1] = "";
+ char value_buffer[256 + 1] = "";
+ BADGE_COLOR_COMPARISON comparison = COLOR_COMPARE_GREATER;
+
+ // example input:
+ // color<max|color>min|color:null...
+
+ const char *c = color;
+ while(*c) {
+ char *dc = color_buffer, *dv = NULL;
+ size_t ci = 0, vi = 0;
+
+ const char *t = c;
+
+ while(*t && *t != '|') {
+ switch(*t) {
+ case '!':
+ if(t[1] == '=') t++;
+ comparison = COLOR_COMPARE_NOTEQUAL;
+ dv = value_buffer;
+ break;
+
+ case '=':
+ case ':':
+ comparison = COLOR_COMPARE_EQUAL;
+ dv = value_buffer;
+ break;
+
+ case '}':
+ case ')':
+ case '>':
+ if(t[1] == '=') {
+ comparison = COLOR_COMPARE_GREATEREQUAL;
+ t++;
+ }
+ else
+ comparison = COLOR_COMPARE_GREATER;
+ dv = value_buffer;
+ break;
+
+ case '{':
+ case '(':
+ case '<':
+ if(t[1] == '=') {
+ comparison = COLOR_COMPARE_LESSEQUAL;
+ t++;
+ }
+ else if(t[1] == '>' || t[1] == ')' || t[1] == '}') {
+ comparison = COLOR_COMPARE_NOTEQUAL;
+ t++;
+ }
+ else
+ comparison = COLOR_COMPARE_LESS;
+ dv = value_buffer;
+ break;
+
+ default:
+ if(dv) {
+ if(vi < 256) {
+ vi++;
+ *dv++ = *t;
+ }
+ }
+ else {
+ if(ci < 256) {
+ ci++;
+ *dc++ = *t;
+ }
+ }
+ break;
+ }
+
+ t++;
+ }
+
+ // prepare for next iteration
+ if(*t == '|') t++;
+ c = t;
+
+ // do the math
+ *dc = '\0';
+ if(dv) {
+ *dv = '\0';
+ NETDATA_DOUBLE v;
+
+ if(!*value_buffer || !strcmp(value_buffer, "null")) {
+ v = NAN;
+ }
+ else {
+ v = str2l(value_buffer);
+ if(isnan(v) || isinf(v))
+ v = NAN;
+ }
+
+ if(unlikely(isnan(value) || isnan(v))) {
+ if(isnan(value) && isnan(v))
+ break;
+ }
+ else {
+ if (unlikely(comparison == COLOR_COMPARE_LESS && isless(value, v))) break;
+ else if (unlikely(comparison == COLOR_COMPARE_LESSEQUAL && islessequal(value, v))) break;
+ else if (unlikely(comparison == COLOR_COMPARE_GREATER && isgreater(value, v))) break;
+ else if (unlikely(comparison == COLOR_COMPARE_GREATEREQUAL && isgreaterequal(value, v))) break;
+ else if (unlikely(comparison == COLOR_COMPARE_EQUAL && !islessgreater(value, v))) break;
+ else if (unlikely(comparison == COLOR_COMPARE_NOTEQUAL && islessgreater(value, v))) break;
+ }
+ }
+ else
+ break;
+ }
+
+ const char *b;
+ if(color_buffer[0])
+ b = color_buffer;
+ else
+ b = color;
+
+ strncpyz(final, b, len);
+}
+
+// value + units
+#define VALUE_STRING_SIZE 100
+
+// label
+#define LABEL_STRING_SIZE 200
+
+// colors
+#define COLOR_STRING_SIZE 100
+
+static inline int allowed_hexa_char(char x) {
+ return ( (x >= '0' && x <= '9') ||
+ (x >= 'a' && x <= 'f') ||
+ (x >= 'A' && x <= 'F')
+ );
+}
+
+static int html_color_check(const char *str) {
+ int i = 0;
+ while(str[i]) {
+ if(!allowed_hexa_char(str[i]))
+ return 0;
+ if(unlikely(i >= 6))
+ return 0;
+ i++;
+ }
+ // want to allow either RGB or RRGGBB
+ return ( i == 6 || i == 3 );
+}
+
+// Will parse color arg as #RRGGBB or #RGB or one of the colors
+// from color_map hash table
+// if parsing fails (argument error) it will return default color
+// given as default parameter (def)
+// in any case it will return either color in "RRGGBB" or "RGB" format as string
+// or whatever is given as def (without checking - caller responsible to give sensible
+// safely escaped default) as default if it fails
+// in any case this function must always return something we can put directly in XML
+// so no escaping is necessary anymore (with exception of default where caller is responsible)
+// to give sensible default
+#define BADGE_SVG_COLOR_ARG_MAXLEN 20
+
+static const char *parse_color_argument(const char *arg, const char *def)
+{
+ if( !arg )
+ return def;
+ size_t len = strnlen(arg, BADGE_SVG_COLOR_ARG_MAXLEN);
+ if( len < 2 || len >= BADGE_SVG_COLOR_ARG_MAXLEN )
+ return def;
+ if( html_color_check(arg) )
+ return arg;
+ return color_map(arg, def);
+}
+
+void buffer_svg(BUFFER *wb, const char *label,
+ NETDATA_DOUBLE value, const char *units, const char *label_color, const char *value_color, int precision, int scale, uint32_t options, int fixed_width_lbl, int fixed_width_val, const char* text_color_lbl, const char* text_color_val) {
+ char value_color_buffer[COLOR_STRING_SIZE + 1]
+ , value_string[VALUE_STRING_SIZE + 1]
+ , label_escaped[LABEL_STRING_SIZE + 1]
+ , value_escaped[VALUE_STRING_SIZE + 1];
+
+ const char *label_color_parsed;
+ const char *value_color_parsed;
+
+ double label_width = (double)fixed_width_lbl, value_width = (double)fixed_width_val, total_width;
+ double height = 20.0, font_size = 11.0, text_offset = 5.8, round_corner = 3.0;
+
+ if(scale < 100) scale = 100;
+
+ if(unlikely(!value_color || !*value_color))
+ value_color = (isnan(value) || isinf(value))?"999":"4c1";
+
+ calc_colorz(value_color, value_color_buffer, COLOR_STRING_SIZE, value);
+ format_value_and_unit(value_string, VALUE_STRING_SIZE, (options & RRDR_OPTION_DISPLAY_ABS)? fabsndd(value):value, units, precision);
+
+ if(fixed_width_lbl <= 0 || fixed_width_val <= 0) {
+ label_width = verdana11_width(label, font_size) + (BADGE_HORIZONTAL_PADDING * 2);
+ value_width = verdana11_width(value_string, font_size) + (BADGE_HORIZONTAL_PADDING * 2);
+ }
+ total_width = label_width + value_width;
+
+ escape_xmlz(label_escaped, label, LABEL_STRING_SIZE);
+ escape_xmlz(value_escaped, value_string, VALUE_STRING_SIZE);
+
+ label_color_parsed = parse_color_argument(label_color, "555");
+ value_color_parsed = parse_color_argument(value_color_buffer, "555");
+
+ wb->contenttype = CT_IMAGE_SVG_XML;
+
+ total_width = total_width * scale / 100.0;
+ height = height * scale / 100.0;
+ font_size = font_size * scale / 100.0;
+ text_offset = text_offset * scale / 100.0;
+ label_width = label_width * scale / 100.0;
+ value_width = value_width * scale / 100.0;
+ round_corner = round_corner * scale / 100.0;
+
+ // svg template from:
+ // https://raw.githubusercontent.com/badges/shields/master/templates/flat-template.svg
+ buffer_sprintf(wb,
+ "<svg xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\" width=\"%0.2f\" height=\"%0.2f\">"
+ "<linearGradient id=\"smooth\" x2=\"0\" y2=\"100%%\">"
+ "<stop offset=\"0\" stop-color=\"#bbb\" stop-opacity=\".1\"/>"
+ "<stop offset=\"1\" stop-opacity=\".1\"/>"
+ "</linearGradient>"
+ "<mask id=\"round\">"
+ "<rect class=\"bdge-ttl-width\" width=\"%0.2f\" height=\"%0.2f\" rx=\"%0.2f\" fill=\"#fff\"/>"
+ "</mask>"
+ "<g mask=\"url(#round)\">"
+ "<rect class=\"bdge-rect-lbl\" width=\"%0.2f\" height=\"%0.2f\" fill=\"#%s\"/>",
+ total_width, height,
+ total_width, height, round_corner,
+ label_width, height, label_color_parsed); //<rect class="bdge-rect-lbl"
+
+ if(fixed_width_lbl > 0 && fixed_width_val > 0) {
+ buffer_sprintf(wb,
+ "<clipPath id=\"lbl-rect\">"
+ "<rect class=\"bdge-rect-lbl\" width=\"%0.2f\" height=\"%0.2f\"/>"
+ "</clipPath>",
+ label_width, height); //<clipPath id="lbl-rect"> <rect class="bdge-rect-lbl"
+ }
+
+ buffer_sprintf(wb,
+ "<rect class=\"bdge-rect-val\" x=\"%0.2f\" width=\"%0.2f\" height=\"%0.2f\" fill=\"#%s\"/>",
+ label_width, value_width, height, value_color_parsed);
+
+ if(fixed_width_lbl > 0 && fixed_width_val > 0) {
+ buffer_sprintf(wb,
+ "<clipPath id=\"val-rect\">"
+ "<rect class=\"bdge-rect-val\" x=\"%0.2f\" width=\"%0.2f\" height=\"%0.2f\"/>"
+ "</clipPath>",
+ label_width, value_width, height);
+ }
+
+ buffer_sprintf(wb,
+ "<rect class=\"bdge-ttl-width\" width=\"%0.2f\" height=\"%0.2f\" fill=\"url(#smooth)\"/>"
+ "</g>"
+ "<g text-anchor=\"middle\" font-family=\"DejaVu Sans,Verdana,Geneva,sans-serif\" font-size=\"%0.2f\">"
+ "<text class=\"bdge-lbl-lbl\" x=\"%0.2f\" y=\"%0.0f\" fill=\"#010101\" fill-opacity=\".3\" clip-path=\"url(#lbl-rect)\">%s</text>"
+ "<text class=\"bdge-lbl-lbl\" x=\"%0.2f\" y=\"%0.0f\" fill=\"#%s\" clip-path=\"url(#lbl-rect)\">%s</text>"
+ "<text class=\"bdge-lbl-val\" x=\"%0.2f\" y=\"%0.0f\" fill=\"#010101\" fill-opacity=\".3\" clip-path=\"url(#val-rect)\">%s</text>"
+ "<text class=\"bdge-lbl-val\" x=\"%0.2f\" y=\"%0.0f\" fill=\"#%s\" clip-path=\"url(#val-rect)\">%s</text>"
+ "</g>",
+ total_width, height,
+ font_size,
+ label_width / 2, ceil(height - text_offset), label_escaped,
+ label_width / 2, ceil(height - text_offset - 1.0), parse_color_argument(text_color_lbl, "fff"), label_escaped,
+ label_width + value_width / 2 -1, ceil(height - text_offset), value_escaped,
+ label_width + value_width / 2 -1, ceil(height - text_offset - 1.0), parse_color_argument(text_color_val, "fff"), value_escaped);
+
+ if(fixed_width_lbl <= 0 || fixed_width_val <= 0){
+ buffer_sprintf(wb,
+ "<script type=\"text/javascript\">"
+ "var bdg_horiz_padding = %d;"
+ "function netdata_bdge_each(list, attr, value){"
+ "Array.prototype.forEach.call(list, function(el){"
+ "el.setAttribute(attr, value);"
+ "});"
+ "};"
+ "var this_svg = document.currentScript.closest(\"svg\");"
+ "var elem_lbl = this_svg.getElementsByClassName(\"bdge-lbl-lbl\");"
+ "var elem_val = this_svg.getElementsByClassName(\"bdge-lbl-val\");"
+ "var lbl_size = elem_lbl[0].getBBox();"
+ "var val_size = elem_val[0].getBBox();"
+ "var width_total = lbl_size.width + bdg_horiz_padding*2;"
+ "this_svg.getElementsByClassName(\"bdge-rect-lbl\")[0].setAttribute(\"width\", width_total);"
+ "netdata_bdge_each(elem_lbl, \"x\", (lbl_size.width / 2) + bdg_horiz_padding);"
+ "netdata_bdge_each(elem_val, \"x\", width_total + (val_size.width / 2) + bdg_horiz_padding);"
+ "var val_rect = this_svg.getElementsByClassName(\"bdge-rect-val\")[0];"
+ "val_rect.setAttribute(\"width\", val_size.width + bdg_horiz_padding*2);"
+ "val_rect.setAttribute(\"x\", width_total);"
+ "width_total += val_size.width + bdg_horiz_padding*2;"
+ "var width_update_elems = this_svg.getElementsByClassName(\"bdge-ttl-width\");"
+ "netdata_bdge_each(width_update_elems, \"width\", width_total);"
+ "this_svg.setAttribute(\"width\", width_total);"
+ "</script>",
+ BADGE_HORIZONTAL_PADDING);
+ }
+ buffer_sprintf(wb, "</svg>");
+}
+
+#define BADGE_URL_ARG_LBL_COLOR "text_color_lbl"
+#define BADGE_URL_ARG_VAL_COLOR "text_color_val"
+
+int web_client_api_request_v1_badge(RRDHOST *host, struct web_client *w, char *url) {
+ int ret = HTTP_RESP_BAD_REQUEST;
+ buffer_flush(w->response.data);
+
+ BUFFER *dimensions = NULL;
+
+ const char *chart = NULL
+ , *before_str = NULL
+ , *after_str = NULL
+ , *points_str = NULL
+ , *multiply_str = NULL
+ , *divide_str = NULL
+ , *label = NULL
+ , *units = NULL
+ , *label_color = NULL
+ , *value_color = NULL
+ , *refresh_str = NULL
+ , *precision_str = NULL
+ , *scale_str = NULL
+ , *alarm = NULL
+ , *fixed_width_lbl_str = NULL
+ , *fixed_width_val_str = NULL
+ , *text_color_lbl_str = NULL
+ , *text_color_val_str = NULL
+ , *group_options = NULL;
+
+ int group = RRDR_GROUPING_AVERAGE;
+ uint32_t options = 0x00000000;
+
+ const RRDCALC_ACQUIRED *rca = NULL;
+ RRDCALC *rc = NULL;
+ RRDSET *st = NULL;
+
+ while(url) {
+ char *value = mystrsep(&url, "&");
+ if(!value || !*value) continue;
+
+ char *name = mystrsep(&value, "=");
+ if(!name || !*name) continue;
+ if(!value || !*value) continue;
+
+ debug(D_WEB_CLIENT, "%llu: API v1 badge.svg query param '%s' with value '%s'", w->id, name, value);
+
+ // name and value are now the parameters
+ // they are not null and not empty
+
+ if(!strcmp(name, "chart")) chart = value;
+ else if(!strcmp(name, "dimension") || !strcmp(name, "dim") || !strcmp(name, "dimensions") || !strcmp(name, "dims")) {
+ if(!dimensions)
+ dimensions = buffer_create(100);
+
+ buffer_strcat(dimensions, "|");
+ buffer_strcat(dimensions, value);
+ }
+ else if(!strcmp(name, "after")) after_str = value;
+ else if(!strcmp(name, "before")) before_str = value;
+ else if(!strcmp(name, "points")) points_str = value;
+ else if(!strcmp(name, "group_options")) group_options = value;
+ else if(!strcmp(name, "group")) {
+ group = web_client_api_request_v1_data_group(value, RRDR_GROUPING_AVERAGE);
+ }
+ else if(!strcmp(name, "options")) {
+ options |= web_client_api_request_v1_data_options(value);
+ }
+ else if(!strcmp(name, "label")) label = value;
+ else if(!strcmp(name, "units")) units = value;
+ else if(!strcmp(name, "label_color")) label_color = value;
+ else if(!strcmp(name, "value_color")) value_color = value;
+ else if(!strcmp(name, "multiply")) multiply_str = value;
+ else if(!strcmp(name, "divide")) divide_str = value;
+ else if(!strcmp(name, "refresh")) refresh_str = value;
+ else if(!strcmp(name, "precision")) precision_str = value;
+ else if(!strcmp(name, "scale")) scale_str = value;
+ else if(!strcmp(name, "fixed_width_lbl")) fixed_width_lbl_str = value;
+ else if(!strcmp(name, "fixed_width_val")) fixed_width_val_str = value;
+ else if(!strcmp(name, "alarm")) alarm = value;
+ else if(!strcmp(name, BADGE_URL_ARG_LBL_COLOR)) text_color_lbl_str = value;
+ else if(!strcmp(name, BADGE_URL_ARG_VAL_COLOR)) text_color_val_str = value;
+ }
+
+ int fixed_width_lbl = -1;
+ int fixed_width_val = -1;
+
+ if(fixed_width_lbl_str && *fixed_width_lbl_str
+ && fixed_width_val_str && *fixed_width_val_str) {
+ fixed_width_lbl = str2i(fixed_width_lbl_str);
+ fixed_width_val = str2i(fixed_width_val_str);
+ }
+
+ if(!chart || !*chart) {
+ buffer_no_cacheable(w->response.data);
+ buffer_sprintf(w->response.data, "No chart id is given at the request.");
+ goto cleanup;
+ }
+
+ int scale = (scale_str && *scale_str)?str2i(scale_str):100;
+
+ st = rrdset_find(host, chart);
+ if(!st) st = rrdset_find_byname(host, chart);
+ if(!st) {
+ buffer_no_cacheable(w->response.data);
+ buffer_svg(w->response.data, "chart not found", NAN, "", NULL, NULL, -1, scale, 0, -1, -1, NULL, NULL);
+ ret = HTTP_RESP_OK;
+ goto cleanup;
+ }
+ st->last_accessed_time = now_realtime_sec();
+
+ if(alarm) {
+ rca = rrdcalc_from_rrdset_get(st, alarm);
+ rc = rrdcalc_acquired_to_rrdcalc(rca);
+
+ if (!rc) {
+ buffer_no_cacheable(w->response.data);
+ buffer_svg(w->response.data, "alarm not found", NAN, "", NULL, NULL, -1, scale, 0, -1, -1, NULL, NULL);
+ ret = HTTP_RESP_OK;
+ goto cleanup;
+ }
+ }
+
+ long long multiply = (multiply_str && *multiply_str )?str2l(multiply_str):1;
+ long long divide = (divide_str && *divide_str )?str2l(divide_str):1;
+ long long before = (before_str && *before_str )?str2l(before_str):0;
+ long long after = (after_str && *after_str )?str2l(after_str):-st->update_every;
+ int points = (points_str && *points_str )?str2i(points_str):1;
+ int precision = (precision_str && *precision_str)?str2i(precision_str):-1;
+
+ if(!multiply) multiply = 1;
+ if(!divide) divide = 1;
+
+ int refresh = 0;
+ if(refresh_str && *refresh_str) {
+ if(!strcmp(refresh_str, "auto")) {
+ if(rc) refresh = rc->update_every;
+ else if(options & RRDR_OPTION_NOT_ALIGNED)
+ refresh = st->update_every;
+ else {
+ refresh = (int)(before - after);
+ if(refresh < 0) refresh = -refresh;
+ }
+ }
+ else {
+ refresh = str2i(refresh_str);
+ if(refresh < 0) refresh = -refresh;
+ }
+ }
+
+ if(!label) {
+ if(alarm) {
+ char *s = (char *)alarm;
+ while(*s) {
+ if(*s == '_') *s = ' ';
+ s++;
+ }
+ label = alarm;
+ }
+ else if(dimensions) {
+ const char *dim = buffer_tostring(dimensions);
+ if(*dim == '|') dim++;
+ label = dim;
+ }
+ else
+ label = rrdset_name(st);
+ }
+ if(!units) {
+ if(alarm) {
+ if(rc->units)
+ units = rrdcalc_units(rc);
+ else
+ units = "";
+ }
+ else if(options & RRDR_OPTION_PERCENTAGE)
+ units = "%";
+ else
+ units = rrdset_units(st);
+ }
+
+ debug(D_WEB_CLIENT, "%llu: API command 'badge.svg' for chart '%s', alarm '%s', dimensions '%s', after '%lld', before '%lld', points '%d', group '%d', options '0x%08x'"
+ , w->id
+ , chart
+ , alarm?alarm:""
+ , (dimensions)?buffer_tostring(dimensions):""
+ , after
+ , before
+ , points
+ , group
+ , options
+ );
+
+ if(rc) {
+ if (refresh > 0) {
+ buffer_sprintf(w->response.header, "Refresh: %d\r\n", refresh);
+ w->response.data->expires = now_realtime_sec() + refresh;
+ }
+ else buffer_no_cacheable(w->response.data);
+
+ if(!value_color) {
+ switch(rc->status) {
+ case RRDCALC_STATUS_CRITICAL:
+ value_color = "red";
+ break;
+
+ case RRDCALC_STATUS_WARNING:
+ value_color = "orange";
+ break;
+
+ case RRDCALC_STATUS_CLEAR:
+ value_color = "brightgreen";
+ break;
+
+ case RRDCALC_STATUS_UNDEFINED:
+ value_color = "lightgrey";
+ break;
+
+ case RRDCALC_STATUS_UNINITIALIZED:
+ value_color = "#000";
+ break;
+
+ default:
+ value_color = "grey";
+ break;
+ }
+ }
+
+ buffer_svg(w->response.data,
+ label,
+ (isnan(rc->value)||isinf(rc->value)) ? rc->value : rc->value * multiply / divide,
+ units,
+ label_color,
+ value_color,
+ precision,
+ scale,
+ options,
+ fixed_width_lbl,
+ fixed_width_val,
+ text_color_lbl_str,
+ text_color_val_str
+ );
+ ret = HTTP_RESP_OK;
+ }
+ else {
+ time_t latest_timestamp = 0;
+ int value_is_null = 1;
+ NETDATA_DOUBLE n = NAN;
+ ret = HTTP_RESP_INTERNAL_SERVER_ERROR;
+
+ // if the collected value is too old, don't calculate its value
+ if (rrdset_last_entry_t(st) >= (now_realtime_sec() - (st->update_every * st->gap_when_lost_iterations_above)))
+ ret = rrdset2value_api_v1(st, w->response.data, &n,
+ (dimensions) ? buffer_tostring(dimensions) : NULL,
+ points, after, before, group, group_options, 0, options,
+ NULL, &latest_timestamp,
+ NULL, NULL, NULL,
+ &value_is_null, NULL, 0, 0,
+ QUERY_SOURCE_API_BADGE);
+
+ // if the value cannot be calculated, show empty badge
+ if (ret != HTTP_RESP_OK) {
+ buffer_no_cacheable(w->response.data);
+ value_is_null = 1;
+ n = 0;
+ ret = HTTP_RESP_OK;
+ }
+ else if (refresh > 0) {
+ buffer_sprintf(w->response.header, "Refresh: %d\r\n", refresh);
+ w->response.data->expires = now_realtime_sec() + refresh;
+ }
+ else buffer_no_cacheable(w->response.data);
+
+ // render the badge
+ buffer_svg(w->response.data,
+ label,
+ (value_is_null)?NAN:(n * multiply / divide),
+ units,
+ label_color,
+ value_color,
+ precision,
+ scale,
+ options,
+ fixed_width_lbl,
+ fixed_width_val,
+ text_color_lbl_str,
+ text_color_val_str
+ );
+ }
+
+cleanup:
+ rrdcalc_from_rrdset_release(st, rca);
+ buffer_free(dimensions);
+ return ret;
+}
diff --git a/web/api/badges/web_buffer_svg.h b/web/api/badges/web_buffer_svg.h
new file mode 100644
index 0000000..7185781
--- /dev/null
+++ b/web/api/badges/web_buffer_svg.h
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_WEB_BUFFER_SVG_H
+#define NETDATA_WEB_BUFFER_SVG_H 1
+
+#include "libnetdata/libnetdata.h"
+#include "web/server/web_client.h"
+
+void buffer_svg(BUFFER *wb, const char *label,
+ NETDATA_DOUBLE value, const char *units, const char *label_color, const char *value_color, int precision, int scale, uint32_t options, int fixed_width_lbl, int fixed_width_val, const char* text_color_lbl, const char* text_color_val);
+char *format_value_and_unit(char *value_string, size_t value_string_len,
+ NETDATA_DOUBLE value, const char *units, int precision);
+
+int web_client_api_request_v1_badge(struct rrdhost *host, struct web_client *w, char *url);
+
+#include "web/api/web_api_v1.h"
+
+#endif /* NETDATA_WEB_BUFFER_SVG_H */
diff --git a/web/api/exporters/Makefile.am b/web/api/exporters/Makefile.am
new file mode 100644
index 0000000..06fda51
--- /dev/null
+++ b/web/api/exporters/Makefile.am
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+SUBDIRS = \
+ shell \
+ prometheus \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/exporters/README.md b/web/api/exporters/README.md
new file mode 100644
index 0000000..1d517a9
--- /dev/null
+++ b/web/api/exporters/README.md
@@ -0,0 +1,10 @@
+<!--
+title: "Exporters"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/exporters/README.md
+-->
+
+# Exporters
+
+TBD
+
+
diff --git a/web/api/exporters/allmetrics.c b/web/api/exporters/allmetrics.c
new file mode 100644
index 0000000..8806540
--- /dev/null
+++ b/web/api/exporters/allmetrics.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "allmetrics.h"
+
+struct prometheus_output_options {
+ char *name;
+ PROMETHEUS_OUTPUT_OPTIONS flag;
+} prometheus_output_flags_root[] = {
+ { "help", PROMETHEUS_OUTPUT_HELP },
+ { "types", PROMETHEUS_OUTPUT_TYPES },
+ { "names", PROMETHEUS_OUTPUT_NAMES },
+ { "timestamps", PROMETHEUS_OUTPUT_TIMESTAMPS },
+ { "variables", PROMETHEUS_OUTPUT_VARIABLES },
+ { "oldunits", PROMETHEUS_OUTPUT_OLDUNITS },
+ { "hideunits", PROMETHEUS_OUTPUT_HIDEUNITS },
+ // terminator
+ { NULL, PROMETHEUS_OUTPUT_NONE },
+};
+
+inline int web_client_api_request_v1_allmetrics(RRDHOST *host, struct web_client *w, char *url) {
+ int format = ALLMETRICS_SHELL;
+ const char *filter = NULL;
+ const char *prometheus_server = w->client_ip;
+
+ uint32_t prometheus_exporting_options;
+ if (prometheus_exporter_instance)
+ prometheus_exporting_options = prometheus_exporter_instance->config.options;
+ else
+ prometheus_exporting_options = global_exporting_options;
+
+ PROMETHEUS_OUTPUT_OPTIONS prometheus_output_options =
+ PROMETHEUS_OUTPUT_TIMESTAMPS |
+ ((prometheus_exporting_options & EXPORTING_OPTION_SEND_NAMES) ? PROMETHEUS_OUTPUT_NAMES : 0);
+
+ const char *prometheus_prefix;
+ if (prometheus_exporter_instance)
+ prometheus_prefix = prometheus_exporter_instance->config.prefix;
+ else
+ prometheus_prefix = global_exporting_prefix;
+
+ while(url) {
+ char *value = mystrsep(&url, "&");
+ if (!value || !*value) continue;
+
+ char *name = mystrsep(&value, "=");
+ if(!name || !*name) continue;
+ if(!value || !*value) continue;
+
+ if(!strcmp(name, "format")) {
+ if(!strcmp(value, ALLMETRICS_FORMAT_SHELL))
+ format = ALLMETRICS_SHELL;
+ else if(!strcmp(value, ALLMETRICS_FORMAT_PROMETHEUS))
+ format = ALLMETRICS_PROMETHEUS;
+ else if(!strcmp(value, ALLMETRICS_FORMAT_PROMETHEUS_ALL_HOSTS))
+ format = ALLMETRICS_PROMETHEUS_ALL_HOSTS;
+ else if(!strcmp(value, ALLMETRICS_FORMAT_JSON))
+ format = ALLMETRICS_JSON;
+ else
+ format = 0;
+ }
+ else if(!strcmp(name, "filter")) {
+ filter = value;
+ }
+ else if(!strcmp(name, "server")) {
+ prometheus_server = value;
+ }
+ else if(!strcmp(name, "prefix")) {
+ prometheus_prefix = value;
+ }
+ else if(!strcmp(name, "data") || !strcmp(name, "source") || !strcmp(name, "data source") || !strcmp(name, "data-source") || !strcmp(name, "data_source") || !strcmp(name, "datasource")) {
+ prometheus_exporting_options = exporting_parse_data_source(value, prometheus_exporting_options);
+ }
+ else {
+ int i;
+ for(i = 0; prometheus_output_flags_root[i].name ; i++) {
+ if(!strcmp(name, prometheus_output_flags_root[i].name)) {
+ if(!strcmp(value, "yes") || !strcmp(value, "1") || !strcmp(value, "true"))
+ prometheus_output_options |= prometheus_output_flags_root[i].flag;
+ else
+ prometheus_output_options &= ~prometheus_output_flags_root[i].flag;
+
+ break;
+ }
+ }
+ }
+ }
+
+ buffer_flush(w->response.data);
+ buffer_no_cacheable(w->response.data);
+
+ switch(format) {
+ case ALLMETRICS_JSON:
+ w->response.data->contenttype = CT_APPLICATION_JSON;
+ rrd_stats_api_v1_charts_allmetrics_json(host, filter, w->response.data);
+ return HTTP_RESP_OK;
+
+ case ALLMETRICS_SHELL:
+ w->response.data->contenttype = CT_TEXT_PLAIN;
+ rrd_stats_api_v1_charts_allmetrics_shell(host, filter, w->response.data);
+ return HTTP_RESP_OK;
+
+ case ALLMETRICS_PROMETHEUS:
+ w->response.data->contenttype = CT_PROMETHEUS;
+ rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(
+ host
+ , filter
+ , w->response.data
+ , prometheus_server
+ , prometheus_prefix
+ , prometheus_exporting_options
+ , prometheus_output_options
+ );
+ return HTTP_RESP_OK;
+
+ case ALLMETRICS_PROMETHEUS_ALL_HOSTS:
+ w->response.data->contenttype = CT_PROMETHEUS;
+ rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts(
+ host
+ , filter
+ , w->response.data
+ , prometheus_server
+ , prometheus_prefix
+ , prometheus_exporting_options
+ , prometheus_output_options
+ );
+ return HTTP_RESP_OK;
+
+ default:
+ w->response.data->contenttype = CT_TEXT_PLAIN;
+ buffer_strcat(w->response.data, "Which format? '" ALLMETRICS_FORMAT_SHELL "', '" ALLMETRICS_FORMAT_PROMETHEUS "', '" ALLMETRICS_FORMAT_PROMETHEUS_ALL_HOSTS "' and '" ALLMETRICS_FORMAT_JSON "' are currently supported.");
+ return HTTP_RESP_BAD_REQUEST;
+ }
+}
diff --git a/web/api/exporters/allmetrics.h b/web/api/exporters/allmetrics.h
new file mode 100644
index 0000000..3afc42e
--- /dev/null
+++ b/web/api/exporters/allmetrics.h
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_ALLMETRICS_H
+#define NETDATA_API_ALLMETRICS_H
+
+#include "web/api/formatters/rrd2json.h"
+#include "shell/allmetrics_shell.h"
+#include "web/server/web_client.h"
+
+int web_client_api_request_v1_allmetrics(RRDHOST *host, struct web_client *w, char *url);
+
+#endif //NETDATA_API_ALLMETRICS_H
diff --git a/web/api/exporters/prometheus/Makefile.am b/web/api/exporters/prometheus/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/web/api/exporters/prometheus/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/exporters/prometheus/README.md b/web/api/exporters/prometheus/README.md
new file mode 100644
index 0000000..cf7e2ca
--- /dev/null
+++ b/web/api/exporters/prometheus/README.md
@@ -0,0 +1,10 @@
+<!--
+title: "Prometheus exporter"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/exporters/prometheus/README.md
+-->
+
+# Prometheus exporter
+
+Read the Prometheus exporter documentation: [Using Netdata with Prometheus](/exporting/prometheus/README.md).
+
+
diff --git a/web/api/exporters/shell/Makefile.am b/web/api/exporters/shell/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/web/api/exporters/shell/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/exporters/shell/README.md b/web/api/exporters/shell/README.md
new file mode 100644
index 0000000..a41326c
--- /dev/null
+++ b/web/api/exporters/shell/README.md
@@ -0,0 +1,69 @@
+<!--
+title: "shell exporter"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/exporters/shell/README.md
+-->
+
+# shell exporter
+
+Shell scripts can now query Netdata:
+
+```sh
+eval "$(curl -s 'http://localhost:19999/api/v1/allmetrics')"
+```
+
+after this command, all the Netdata metrics are exposed to shell. Check:
+
+```sh
+# source the metrics
+eval "$(curl -s 'http://localhost:19999/api/v1/allmetrics')"
+
+# let's see if there are variables exposed by Netdata for system.cpu
+set | grep "^NETDATA_SYSTEM_CPU"
+
+NETDATA_SYSTEM_CPU_GUEST=0
+NETDATA_SYSTEM_CPU_GUEST_NICE=0
+NETDATA_SYSTEM_CPU_IDLE=95
+NETDATA_SYSTEM_CPU_IOWAIT=0
+NETDATA_SYSTEM_CPU_IRQ=0
+NETDATA_SYSTEM_CPU_NICE=0
+NETDATA_SYSTEM_CPU_SOFTIRQ=0
+NETDATA_SYSTEM_CPU_STEAL=0
+NETDATA_SYSTEM_CPU_SYSTEM=1
+NETDATA_SYSTEM_CPU_USER=4
+NETDATA_SYSTEM_CPU_VISIBLETOTAL=5
+
+# let's see the total cpu utilization of the system
+echo ${NETDATA_SYSTEM_CPU_VISIBLETOTAL}
+5
+
+# what about alarms?
+set | grep "^NETDATA_ALARM_SYSTEM_SWAP_"
+NETDATA_ALARM_SYSTEM_SWAP_USED_SWAP_STATUS=CLEAR
+NETDATA_ALARM_SYSTEM_SWAP_USED_SWAP_VALUE=51
+
+# let's get the current status of the alarm 'used swap'
+echo ${NETDATA_ALARM_SYSTEM_SWAP_USED_SWAP_STATUS}
+CLEAR
+
+# is it fast?
+time curl -s 'http://localhost:19999/api/v1/allmetrics' >/dev/null
+
+real 0m0,070s
+user 0m0,000s
+sys 0m0,007s
+
+# it is...
+# 0.07 seconds for curl to be loaded, connect to Netdata and fetch the response back...
+```
+
+The `_VISIBLETOTAL` variable sums up all the dimensions of each chart.
+
+The format of the variables is:
+
+```sh
+NETDATA_${chart_id^^}_${dimension_id^^}="${value}"
+```
+
+The value is rounded to the closest integer, since shell script cannot process decimal numbers.
+
+
diff --git a/web/api/exporters/shell/allmetrics_shell.c b/web/api/exporters/shell/allmetrics_shell.c
new file mode 100644
index 0000000..0ffbac6
--- /dev/null
+++ b/web/api/exporters/shell/allmetrics_shell.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "allmetrics_shell.h"
+
+// ----------------------------------------------------------------------------
+// BASH
+// /api/v1/allmetrics?format=bash
+
+static inline size_t shell_name_copy(char *d, const char *s, size_t usable) {
+ size_t n;
+
+ for(n = 0; *s && n < usable ; d++, s++, n++) {
+ register char c = *s;
+
+ if(unlikely(!isalnum(c))) *d = '_';
+ else *d = (char)toupper(c);
+ }
+ *d = '\0';
+
+ return n;
+}
+
+#define SHELL_ELEMENT_MAX 100
+
+void rrd_stats_api_v1_charts_allmetrics_shell(RRDHOST *host, const char *filter_string, BUFFER *wb) {
+ analytics_log_shell();
+ SIMPLE_PATTERN *filter = simple_pattern_create(filter_string, NULL, SIMPLE_PATTERN_EXACT);
+
+ // for each chart
+ RRDSET *st;
+ rrdset_foreach_read(st, host) {
+ if (filter && !simple_pattern_matches(filter, rrdset_name(st)))
+ continue;
+
+ NETDATA_DOUBLE total = 0.0;
+ char chart[SHELL_ELEMENT_MAX + 1];
+ shell_name_copy(chart, st->name?rrdset_name(st):rrdset_id(st), SHELL_ELEMENT_MAX);
+
+ buffer_sprintf(wb, "\n# chart: %s (name: %s)\n", rrdset_id(st), rrdset_name(st));
+ if(rrdset_is_available_for_viewers(st)) {
+ // for each dimension
+ RRDDIM *rd;
+ rrddim_foreach_read(rd, st) {
+ if(rd->collections_counter && !rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE)) {
+ char dimension[SHELL_ELEMENT_MAX + 1];
+ shell_name_copy(dimension, rd->name?rrddim_name(rd):rrddim_id(rd), SHELL_ELEMENT_MAX);
+
+ NETDATA_DOUBLE n = rd->last_stored_value;
+
+ if(isnan(n) || isinf(n))
+ buffer_sprintf(wb, "NETDATA_%s_%s=\"\" # %s\n", chart, dimension, rrdset_units(st));
+ else {
+ if(rd->multiplier < 0 || rd->divisor < 0) n = -n;
+ n = roundndd(n);
+ if(!rrddim_option_check(rd, RRDDIM_OPTION_HIDDEN)) total += n;
+ buffer_sprintf(wb, "NETDATA_%s_%s=\"" NETDATA_DOUBLE_FORMAT_ZERO "\" # %s\n", chart, dimension, n, rrdset_units(st));
+ }
+ }
+ }
+ rrddim_foreach_done(rd);
+
+ total = roundndd(total);
+ buffer_sprintf(wb, "NETDATA_%s_VISIBLETOTAL=\"" NETDATA_DOUBLE_FORMAT_ZERO "\" # %s\n", chart, total, rrdset_units(st));
+ }
+ }
+ rrdset_foreach_done(st);
+
+ buffer_strcat(wb, "\n# NETDATA ALARMS RUNNING\n");
+
+ RRDCALC *rc;
+ foreach_rrdcalc_in_rrdhost_read(host, rc) {
+ if(!rc->rrdset) continue;
+
+ char chart[SHELL_ELEMENT_MAX + 1];
+ shell_name_copy(chart, rc->rrdset->name?rrdset_name(rc->rrdset):rrdset_id(rc->rrdset), SHELL_ELEMENT_MAX);
+
+ char alarm[SHELL_ELEMENT_MAX + 1];
+ shell_name_copy(alarm, rrdcalc_name(rc), SHELL_ELEMENT_MAX);
+
+ NETDATA_DOUBLE n = rc->value;
+
+ if(isnan(n) || isinf(n))
+ buffer_sprintf(wb, "NETDATA_ALARM_%s_%s_VALUE=\"\" # %s\n", chart, alarm, rrdcalc_units(rc));
+ else {
+ n = roundndd(n);
+ buffer_sprintf(wb, "NETDATA_ALARM_%s_%s_VALUE=\"" NETDATA_DOUBLE_FORMAT_ZERO "\" # %s\n", chart, alarm, n, rrdcalc_units(rc));
+ }
+
+ buffer_sprintf(wb, "NETDATA_ALARM_%s_%s_STATUS=\"%s\"\n", chart, alarm, rrdcalc_status2string(rc->status));
+ }
+ foreach_rrdcalc_in_rrdhost_done(rc);
+
+ simple_pattern_free(filter);
+}
+
+// ----------------------------------------------------------------------------
+
+void rrd_stats_api_v1_charts_allmetrics_json(RRDHOST *host, const char *filter_string, BUFFER *wb) {
+ analytics_log_json();
+ SIMPLE_PATTERN *filter = simple_pattern_create(filter_string, NULL, SIMPLE_PATTERN_EXACT);
+
+ buffer_strcat(wb, "{");
+
+ size_t chart_counter = 0;
+ size_t dimension_counter = 0;
+
+ // for each chart
+ RRDSET *st;
+ rrdset_foreach_read(st, host) {
+ if (filter && !(simple_pattern_matches(filter, rrdset_id(st)) || simple_pattern_matches(filter, rrdset_name(st))))
+ continue;
+
+ if(rrdset_is_available_for_viewers(st)) {
+ buffer_sprintf(
+ wb,
+ "%s\n"
+ "\t\"%s\": {\n"
+ "\t\t\"name\":\"%s\",\n"
+ "\t\t\"family\":\"%s\",\n"
+ "\t\t\"context\":\"%s\",\n"
+ "\t\t\"units\":\"%s\",\n"
+ "\t\t\"last_updated\": %"PRId64",\n"
+ "\t\t\"dimensions\": {",
+ chart_counter ? "," : "",
+ rrdset_id(st),
+ rrdset_name(st),
+ rrdset_family(st),
+ rrdset_context(st),
+ rrdset_units(st),
+ (int64_t)rrdset_last_entry_t(st));
+
+ chart_counter++;
+ dimension_counter = 0;
+
+ // for each dimension
+ RRDDIM *rd;
+ rrddim_foreach_read(rd, st) {
+ if(rd->collections_counter && !rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE)) {
+ buffer_sprintf(
+ wb,
+ "%s\n"
+ "\t\t\t\"%s\": {\n"
+ "\t\t\t\t\"name\": \"%s\",\n"
+ "\t\t\t\t\"value\": ",
+ dimension_counter ? "," : "",
+ rrddim_id(rd),
+ rrddim_name(rd));
+
+ if(isnan(rd->last_stored_value))
+ buffer_strcat(wb, "null");
+ else
+ buffer_sprintf(wb, NETDATA_DOUBLE_FORMAT, rd->last_stored_value);
+
+ buffer_strcat(wb, "\n\t\t\t}");
+
+ dimension_counter++;
+ }
+ }
+ rrddim_foreach_done(rd);
+
+ buffer_strcat(wb, "\n\t\t}\n\t}");
+ }
+ }
+ rrdset_foreach_done(st);
+
+ buffer_strcat(wb, "\n}");
+ simple_pattern_free(filter);
+}
+
diff --git a/web/api/exporters/shell/allmetrics_shell.h b/web/api/exporters/shell/allmetrics_shell.h
new file mode 100644
index 0000000..d6598e0
--- /dev/null
+++ b/web/api/exporters/shell/allmetrics_shell.h
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_ALLMETRICS_SHELL_H
+#define NETDATA_API_ALLMETRICS_SHELL_H
+
+#include "../allmetrics.h"
+
+#define ALLMETRICS_FORMAT_SHELL "shell"
+#define ALLMETRICS_FORMAT_PROMETHEUS "prometheus"
+#define ALLMETRICS_FORMAT_PROMETHEUS_ALL_HOSTS "prometheus_all_hosts"
+#define ALLMETRICS_FORMAT_JSON "json"
+
+#define ALLMETRICS_SHELL 1
+#define ALLMETRICS_PROMETHEUS 2
+#define ALLMETRICS_JSON 3
+#define ALLMETRICS_PROMETHEUS_ALL_HOSTS 4
+
+void rrd_stats_api_v1_charts_allmetrics_json(RRDHOST *host, const char *filter_string, BUFFER *wb);
+void rrd_stats_api_v1_charts_allmetrics_shell(RRDHOST *host, const char *filter_string, BUFFER *wb);
+
+#endif //NETDATA_API_ALLMETRICS_SHELL_H
diff --git a/web/api/formatters/Makefile.am b/web/api/formatters/Makefile.am
new file mode 100644
index 0000000..11f239c
--- /dev/null
+++ b/web/api/formatters/Makefile.am
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+SUBDIRS = \
+ csv \
+ json \
+ ssv \
+ value \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/formatters/README.md b/web/api/formatters/README.md
new file mode 100644
index 0000000..3e67ac6
--- /dev/null
+++ b/web/api/formatters/README.md
@@ -0,0 +1,78 @@
+<!--
+title: "Query formatting"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/formatters/README.md
+-->
+
+# Query formatting
+
+API data queries need to be formatted before returned to the caller.
+Using API parameters, the caller may define the format he/she wishes to get back.
+
+The following formats are supported:
+
+| format|module|content type|description|
+|:----:|:----:|:----------:|:----------|
+| `array`|[ssv](/web/api/formatters/ssv/README.md)|application/json|a JSON array|
+| `csv`|[csv](/web/api/formatters/csv/README.md)|text/plain|a text table, comma separated, with a header line (dimension names) and `\r\n` at the end of the lines|
+| `csvjsonarray`|[csv](/web/api/formatters/csv/README.md)|application/json|a JSON array, with each row as another array (the first row has the dimension names)|
+| `datasource`|[json](/web/api/formatters/json/README.md)|application/json|a Google Visualization Provider `datasource` javascript callback|
+| `datatable`|[json](/web/api/formatters/json/README.md)|application/json|a Google `datatable`|
+| `html`|[csv](/web/api/formatters/csv/README.md)|text/html|an html table|
+| `json`|[json](/web/api/formatters/json/README.md)|application/json|a JSON object|
+| `jsonp`|[json](/web/api/formatters/json/README.md)|application/json|a JSONP javascript callback|
+| `markdown`|[csv](/web/api/formatters/csv/README.md)|text/plain|a markdown table|
+| `ssv`|[ssv](/web/api/formatters/ssv/README.md)|text/plain|a space separated list of values|
+| `ssvcomma`|[ssv](/web/api/formatters/ssv/README.md)|text/plain|a comma separated list of values|
+| `tsv`|[csv](/web/api/formatters/csv/README.md)|text/plain|a TAB delimited `csv` (MS Excel flavor)|
+
+For examples of each format, check the relative module documentation.
+
+## Metadata with the `jsonwrap` option
+
+All data queries can be encapsulated to JSON object having metadata about the query and the results.
+
+This is done by adding the `options=jsonwrap` to the API URL (if there are other `options` append
+`,jsonwrap` to the existing ones).
+
+This is such an object:
+
+```bash
+# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=system.cpu&after=-3600&points=6&group=average&format=csv&options=nonzero,jsonwrap'
+{
+ "api": 1,
+ "id": "system.cpu",
+ "name": "system.cpu",
+ "view_update_every": 600,
+ "update_every": 1,
+ "first_entry": 1540387074,
+ "last_entry": 1540647070,
+ "before": 1540647000,
+ "after": 1540644000,
+ "dimension_names": ["steal", "softirq", "user", "system", "iowait"],
+ "dimension_ids": ["steal", "softirq", "user", "system", "iowait"],
+ "latest_values": [0, 0.2493766, 1.745636, 0.4987531, 0],
+ "view_latest_values": [0.0158314, 0.0516506, 0.866549, 0.7196127, 0.0050002],
+ "dimensions": 5,
+ "points": 6,
+ "format": "csv",
+ "result": "time,steal,softirq,user,system,iowait\n2018-10-27 13:30:00,0.0158314,0.0516506,0.866549,0.7196127,0.0050002\n2018-10-27 13:20:00,0.0149856,0.0529183,0.8673155,0.7121144,0.0049979\n2018-10-27 13:10:00,0.0137501,0.053315,0.8578097,0.7197613,0.0054209\n2018-10-27 13:00:00,0.0154252,0.0554688,0.899432,0.7200638,0.0067252\n2018-10-27 12:50:00,0.0145866,0.0495922,0.8404341,0.7011141,0.0041688\n2018-10-27 12:40:00,0.0162366,0.0595954,0.8827475,0.7020573,0.0041636\n",
+ "min": 0,
+ "max": 0
+}
+```
+
+## Downloading data query result files
+
+Following the [Google Visualization Provider guidelines](https://developers.google.com/chart/interactive/docs/dev/implementing_data_source),
+Netdata supports parsing `tqx` options.
+
+Using these options, any Netdata data query can instruct the web browser to download
+the result and save it under a given filename.
+
+For example, to download a CSV file with CPU utilization of the last hour,
+[click here](https://registry.my-netdata.io/api/v1/data?chart=system.cpu&after=-3600&format=csv&options=nonzero&tqx=outFileName:system+cpu+utilization+of+the+last_hour.csv).
+
+This is done by appending `&tqx=outFileName:FILENAME` to any data query.
+The output will be in the format given with `&format=`.
+
+
diff --git a/web/api/formatters/charts2json.c b/web/api/formatters/charts2json.c
new file mode 100644
index 0000000..1fc20b4
--- /dev/null
+++ b/web/api/formatters/charts2json.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "charts2json.h"
+
+// generate JSON for the /api/v1/charts API call
+
+const char* get_release_channel() {
+ static int use_stable = -1;
+
+ if (use_stable == -1) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/.environment", netdata_configured_user_config_dir);
+ procfile *ff = procfile_open(filename, "=", PROCFILE_FLAG_DEFAULT);
+ if (ff) {
+ procfile_set_quotes(ff, "'\"");
+ ff = procfile_readall(ff);
+ if (ff) {
+ unsigned int i;
+ for (i = 0; i < procfile_lines(ff); i++) {
+ if (!procfile_linewords(ff, i))
+ continue;
+ if (!strcmp(procfile_lineword(ff, i, 0), "RELEASE_CHANNEL")) {
+ if (!strcmp(procfile_lineword(ff, i, 1), "stable"))
+ use_stable = 1;
+ else if (!strcmp(procfile_lineword(ff, i, 1), "nightly"))
+ use_stable = 0;
+ break;
+ }
+ }
+ procfile_close(ff);
+ }
+ }
+ if (use_stable == -1)
+ use_stable = strchr(program_version, '-') ? 0 : 1;
+ }
+ return (use_stable)?"stable":"nightly";
+}
+
+void charts2json(RRDHOST *host, BUFFER *wb, int skip_volatile, int show_archived) {
+ static char *custom_dashboard_info_js_filename = NULL;
+ size_t c, dimensions = 0, memory = 0, alarms = 0;
+ RRDSET *st;
+
+ time_t now = now_realtime_sec();
+
+ if(unlikely(!custom_dashboard_info_js_filename))
+ custom_dashboard_info_js_filename = config_get(CONFIG_SECTION_WEB, "custom dashboard_info.js", "");
+
+ buffer_sprintf(wb, "{\n"
+ "\t\"hostname\": \"%s\""
+ ",\n\t\"version\": \"%s\""
+ ",\n\t\"release_channel\": \"%s\""
+ ",\n\t\"os\": \"%s\""
+ ",\n\t\"timezone\": \"%s\""
+ ",\n\t\"update_every\": %d"
+ ",\n\t\"history\": %ld"
+ ",\n\t\"memory_mode\": \"%s\""
+ ",\n\t\"custom_info\": \"%s\""
+ ",\n\t\"charts\": {"
+ , rrdhost_hostname(host)
+ , rrdhost_program_version(host)
+ , get_release_channel()
+ , rrdhost_os(host)
+ , rrdhost_timezone(host)
+ , host->rrd_update_every
+ , host->rrd_history_entries
+ , rrd_memory_mode_name(host->rrd_memory_mode)
+ , custom_dashboard_info_js_filename
+ );
+
+ c = 0;
+ rrdset_foreach_read(st, host) {
+ if ((!show_archived && rrdset_is_available_for_viewers(st)) || (show_archived && rrdset_is_archived(st))) {
+ if(c) buffer_strcat(wb, ",");
+ buffer_strcat(wb, "\n\t\t\"");
+ buffer_strcat(wb, rrdset_id(st));
+ buffer_strcat(wb, "\": ");
+ rrdset2json(st, wb, &dimensions, &memory, skip_volatile);
+
+ c++;
+ st->last_accessed_time = now;
+ }
+ }
+ rrdset_foreach_done(st);
+
+ RRDCALC *rc;
+ foreach_rrdcalc_in_rrdhost_read(host, rc) {
+ if(rc->rrdset)
+ alarms++;
+ }
+ foreach_rrdcalc_in_rrdhost_done(rc);
+
+ buffer_sprintf(wb
+ , "\n\t}"
+ ",\n\t\"charts_count\": %zu"
+ ",\n\t\"dimensions_count\": %zu"
+ ",\n\t\"alarms_count\": %zu"
+ ",\n\t\"rrd_memory_bytes\": %zu"
+ ",\n\t\"hosts_count\": %zu"
+ ",\n\t\"hosts\": ["
+ , c
+ , dimensions
+ , alarms
+ , memory
+ , rrd_hosts_available
+ );
+
+ if(unlikely(rrd_hosts_available > 1)) {
+ rrd_rdlock();
+
+ size_t found = 0;
+ RRDHOST *h;
+ rrdhost_foreach_read(h) {
+ if(!rrdhost_should_be_removed(h, host, now) && !rrdhost_flag_check(h, RRDHOST_FLAG_ARCHIVED)) {
+ buffer_sprintf(wb
+ , "%s\n\t\t{"
+ "\n\t\t\t\"hostname\": \"%s\""
+ "\n\t\t}"
+ , (found > 0) ? "," : ""
+ , rrdhost_hostname(h)
+ );
+
+ found++;
+ }
+ }
+
+ rrd_unlock();
+ }
+ else {
+ buffer_sprintf(wb
+ , "\n\t\t{"
+ "\n\t\t\t\"hostname\": \"%s\""
+ "\n\t\t}"
+ , rrdhost_hostname(host)
+ );
+ }
+
+ buffer_sprintf(wb, "\n\t]\n}\n");
+}
+
+// generate collectors list for the api/v1/info call
+
+struct collector {
+ const char *plugin;
+ const char *module;
+};
+
+struct array_printer {
+ int c;
+ BUFFER *wb;
+};
+
+static int print_collector_callback(const DICTIONARY_ITEM *item __maybe_unused, void *entry, void *data) {
+ struct array_printer *ap = (struct array_printer *)data;
+ BUFFER *wb = ap->wb;
+ struct collector *col=(struct collector *) entry;
+ if(ap->c) buffer_strcat(wb, ",");
+ buffer_strcat(wb, "\n\t\t{\n\t\t\t\"plugin\": \"");
+ buffer_strcat(wb, col->plugin);
+ buffer_strcat(wb, "\",\n\t\t\t\"module\": \"");
+ buffer_strcat(wb, col->module);
+ buffer_strcat(wb, "\"\n\t\t}");
+ (ap->c)++;
+ return 0;
+}
+
+void chartcollectors2json(RRDHOST *host, BUFFER *wb) {
+ DICTIONARY *dict = dictionary_create(DICT_OPTION_SINGLE_THREADED);
+ RRDSET *st;
+ char name[500];
+
+ time_t now = now_realtime_sec();
+ rrdset_foreach_read(st, host) {
+ if (rrdset_is_available_for_viewers(st)) {
+ struct collector col = {
+ .plugin = rrdset_plugin_name(st),
+ .module = rrdset_module_name(st)
+ };
+ sprintf(name, "%s:%s", col.plugin, col.module);
+ dictionary_set(dict, name, &col, sizeof(struct collector));
+ st->last_accessed_time = now;
+ }
+ }
+ rrdset_foreach_done(st);
+ struct array_printer ap = {
+ .c = 0,
+ .wb = wb
+ };
+ dictionary_walkthrough_read(dict, print_collector_callback, &ap);
+ dictionary_destroy(dict);
+}
diff --git a/web/api/formatters/charts2json.h b/web/api/formatters/charts2json.h
new file mode 100644
index 0000000..d4b04af
--- /dev/null
+++ b/web/api/formatters/charts2json.h
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_FORMATTER_CHARTS2JSON_H
+#define NETDATA_API_FORMATTER_CHARTS2JSON_H
+
+#include "rrd2json.h"
+
+void charts2json(RRDHOST *host, BUFFER *wb, int skip_volatile, int show_archived);
+void chartcollectors2json(RRDHOST *host, BUFFER *wb);
+const char* get_release_channel();
+
+#endif //NETDATA_API_FORMATTER_CHARTS2JSON_H
diff --git a/web/api/formatters/csv/Makefile.am b/web/api/formatters/csv/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/web/api/formatters/csv/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/formatters/csv/README.md b/web/api/formatters/csv/README.md
new file mode 100644
index 0000000..df7c11e
--- /dev/null
+++ b/web/api/formatters/csv/README.md
@@ -0,0 +1,144 @@
+<!--
+title: "CSV formatter"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/formatters/csv/README.md
+-->
+
+# CSV formatter
+
+The CSV formatter presents [results of database queries](/web/api/queries/README.md) in the following formats:
+
+| format|content type|description|
+| :----:|:----------:|:----------|
+| `csv`|text/plain|a text table, comma separated, with a header line (dimension names) and `\r\n` at the end of the lines|
+| `csvjsonarray`|application/json|a JSON array, with each row as another array (the first row has the dimension names)|
+| `tsv`|text/plain|like `csv` but TAB is used instead of comma to separate values (MS Excel flavor)|
+| `html`|text/html|an html table|
+| `markdown`|text/plain|markdown table|
+
+In all formats the date and time is the first column.
+
+The CSV formatter respects the following API `&options=`:
+
+| option|supported|description|
+|:----:|:-------:|:----------|
+| `nonzero`|yes|to return only the dimensions that have at least a non-zero value|
+| `flip`|yes|to return the rows older to newer (the default is newer to older)|
+| `seconds`|yes|to return the date and time in unix timestamp|
+| `ms`|yes|to return the date and time in unit timestamp as milliseconds|
+| `percent`|yes|to replace all values with their percentage over the row total|
+| `abs`|yes|to turn all values positive|
+| `null2zero`|yes|to replace gaps with zeros (the default prints the string `null`|
+
+## Examples
+
+Get the system total bandwidth for all physical network interfaces, over the last hour,
+in 6 rows (one for every 10 minutes), in `csv` format:
+
+Netdata always returns bandwidth in `kilobits`.
+
+```bash
+# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=system.net&format=csv&after=-3600&group=sum&points=6&options=abs'
+time,received,sent
+2018-10-26 23:50:00,90214.67847,215137.79762
+2018-10-26 23:40:00,90126.32286,238587.57522
+2018-10-26 23:30:00,86061.22688,213389.23526
+2018-10-26 23:20:00,85590.75164,206129.01608
+2018-10-26 23:10:00,83163.30691,194311.77384
+2018-10-26 23:00:00,85167.29657,197538.07773
+```
+
+---
+
+Get the max RAM used by the SQL server and any cron jobs, over the last hour, in 2 rows (one for every 30
+minutes), in `tsv` format, and format the date and time as unix timestamp:
+
+Netdata always returns memory in `MB`.
+
+```bash
+# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=apps.mem&format=tsv&after=-3600&group=max&points=2&options=nonzero,seconds&dimensions=sql,cron'
+time sql cron
+1540598400 61.95703 0.25
+1540596600 61.95703 0.25
+```
+
+---
+
+Get an HTML table of the last 4 values (4 seconds) of system CPU utilization:
+
+Netdata always returns CPU utilization as `%`.
+
+```bash
+# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=system.cpu&format=html&after=-4&options=nonzero'
+<html>
+<center>
+<table border="0" cellpadding="5" cellspacing="5">
+<tr><td>time</td><td>softirq</td><td>user</td><td>system</td></tr>
+<tr><td>2018-10-27 00:16:07</td><td>0.25</td><td>1</td><td>0.75</td></tr>
+<tr><td>2018-10-27 00:16:06</td><td>0</td><td>1.0025063</td><td>0.5012531</td></tr>
+<tr><td>2018-10-27 00:16:05</td><td>0</td><td>1</td><td>0.75</td></tr>
+<tr><td>2018-10-27 00:16:04</td><td>0</td><td>1.0025063</td><td>0.7518797</td></tr>
+</table>
+</center>
+</html>
+```
+
+This is how it looks when rendered by a web browser:
+
+![image](https://user-images.githubusercontent.com/2662304/47597887-bafbf480-d99c-11e8-864a-d880bb8d2e5b.png)
+
+---
+
+Get a JSON array with the average bandwidth rate of the mysql server, over the last hour, in 6 values
+(one every 10 minutes), and return the date and time in milliseconds:
+
+Netdata always returns bandwidth rates in `kilobits/s`.
+
+```bash
+# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=mysql_local.net&format=csvjsonarray&after=-3600&points=6&group=average&options=abs,ms'
+[
+["time","in","out"],
+[1540599600000,0.7499986,120.2810185],
+[1540599000000,0.7500019,120.2815509],
+[1540598400000,0.7499999,120.2812319],
+[1540597800000,0.7500044,120.2819634],
+[1540597200000,0.7499968,120.2807337],
+[1540596600000,0.7499988,120.2810527]
+]
+```
+
+---
+
+Get the number of processes started per minute, for the last 10 minutes, in `markdown` format:
+
+```bash
+# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=system.forks&format=markdown&after=-600&points=10&group=sum'
+time | started
+:---: |:---:
+2018-10-27 03:52:00| 245.1706149
+2018-10-27 03:51:00| 152.6654636
+2018-10-27 03:50:00| 163.1755789
+2018-10-27 03:49:00| 176.1574766
+2018-10-27 03:48:00| 178.0137076
+2018-10-27 03:47:00| 183.8306543
+2018-10-27 03:46:00| 264.1635621
+2018-10-27 03:45:00| 205.001551
+2018-10-27 03:44:00| 7026.9852167
+2018-10-27 03:43:00| 205.9904794
+```
+
+And this is how it looks when formatted:
+
+| time | started |
+|:--:|:-----:|
+| 2018-10-27 03:52:00 | 245.1706149 |
+| 2018-10-27 03:51:00 | 152.6654636 |
+| 2018-10-27 03:50:00 | 163.1755789 |
+| 2018-10-27 03:49:00 | 176.1574766 |
+| 2018-10-27 03:48:00 | 178.0137076 |
+| 2018-10-27 03:47:00 | 183.8306543 |
+| 2018-10-27 03:46:00 | 264.1635621 |
+| 2018-10-27 03:45:00 | 205.001551 |
+| 2018-10-27 03:44:00 | 7026.9852167 |
+| 2018-10-27 03:43:00 | 205.9904794 |
+
+
diff --git a/web/api/formatters/csv/csv.c b/web/api/formatters/csv/csv.c
new file mode 100644
index 0000000..603a171
--- /dev/null
+++ b/web/api/formatters/csv/csv.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "libnetdata/libnetdata.h"
+#include "csv.h"
+
+void rrdr2csv(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS options, const char *startline, const char *separator, const char *endline, const char *betweenlines) {
+ //info("RRD2CSV(): %s: BEGIN", r->st->id);
+ QUERY_TARGET *qt = r->internal.qt;
+ long c, i;
+ const long used = qt->query.used;
+
+ // print the csv header
+ for(c = 0, i = 0; c < used ; c++) {
+ if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
+ if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
+
+ if(!i) {
+ buffer_strcat(wb, startline);
+ if(options & RRDR_OPTION_LABEL_QUOTES) buffer_strcat(wb, "\"");
+ buffer_strcat(wb, "time");
+ if(options & RRDR_OPTION_LABEL_QUOTES) buffer_strcat(wb, "\"");
+ }
+ buffer_strcat(wb, separator);
+ if(options & RRDR_OPTION_LABEL_QUOTES) buffer_strcat(wb, "\"");
+ buffer_strcat(wb, string2str(qt->query.array[c].dimension.name));
+ if(options & RRDR_OPTION_LABEL_QUOTES) buffer_strcat(wb, "\"");
+ i++;
+ }
+ buffer_strcat(wb, endline);
+
+ if(format == DATASOURCE_CSV_MARKDOWN) {
+ // print the --- line after header
+ for(c = 0, i = 0; c < used ;c++) {
+ if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
+ if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
+
+ if(!i) {
+ buffer_strcat(wb, startline);
+ if(options & RRDR_OPTION_LABEL_QUOTES) buffer_strcat(wb, "\"");
+ buffer_strcat(wb, ":---:");
+ if(options & RRDR_OPTION_LABEL_QUOTES) buffer_strcat(wb, "\"");
+ }
+ buffer_strcat(wb, separator);
+ if(options & RRDR_OPTION_LABEL_QUOTES) buffer_strcat(wb, "\"");
+ buffer_strcat(wb, ":---:");
+ if(options & RRDR_OPTION_LABEL_QUOTES) buffer_strcat(wb, "\"");
+ i++;
+ }
+ buffer_strcat(wb, endline);
+ }
+
+ if(!i) {
+ // no dimensions present
+ return;
+ }
+
+ long start = 0, end = rrdr_rows(r), step = 1;
+ if(!(options & RRDR_OPTION_REVERSED)) {
+ start = rrdr_rows(r) - 1;
+ end = -1;
+ step = -1;
+ }
+
+ // for each line in the array
+ NETDATA_DOUBLE total = 1;
+ for(i = start; i != end ;i += step) {
+ NETDATA_DOUBLE *cn = &r->v[ i * r->d ];
+ RRDR_VALUE_FLAGS *co = &r->o[ i * r->d ];
+
+ buffer_strcat(wb, betweenlines);
+ buffer_strcat(wb, startline);
+
+ time_t now = r->t[i];
+
+ if((options & RRDR_OPTION_SECONDS) || (options & RRDR_OPTION_MILLISECONDS)) {
+ // print the timestamp of the line
+ buffer_rrd_value(wb, (NETDATA_DOUBLE)now);
+ // in ms
+ if(options & RRDR_OPTION_MILLISECONDS) buffer_strcat(wb, "000");
+ }
+ else {
+ // generate the local date time
+ struct tm tmbuf, *tm = localtime_r(&now, &tmbuf);
+ if(!tm) { error("localtime() failed."); continue; }
+ buffer_date(wb, tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec);
+ }
+
+ int set_min_max = 0;
+ if(unlikely(options & RRDR_OPTION_PERCENTAGE)) {
+ total = 0;
+ for(c = 0; c < used ;c++) {
+ NETDATA_DOUBLE n = cn[c];
+
+ if(likely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
+ n = -n;
+
+ total += n;
+ }
+ // prevent a division by zero
+ if(total == 0) total = 1;
+ set_min_max = 1;
+ }
+
+ // for each dimension
+ for(c = 0; c < used ;c++) {
+ if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
+ if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
+
+ buffer_strcat(wb, separator);
+
+ NETDATA_DOUBLE n = cn[c];
+
+ if(co[c] & RRDR_VALUE_EMPTY) {
+ if(options & RRDR_OPTION_NULL2ZERO)
+ buffer_strcat(wb, "0");
+ else
+ buffer_strcat(wb, "null");
+ }
+ else {
+ if(unlikely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
+ n = -n;
+
+ if(unlikely(options & RRDR_OPTION_PERCENTAGE)) {
+ n = n * 100 / total;
+
+ if(unlikely(set_min_max)) {
+ r->min = r->max = n;
+ set_min_max = 0;
+ }
+
+ if(n < r->min) r->min = n;
+ if(n > r->max) r->max = n;
+ }
+
+ buffer_rrd_value(wb, n);
+ }
+ }
+
+ buffer_strcat(wb, endline);
+ }
+ //info("RRD2CSV(): %s: END", r->st->id);
+}
diff --git a/web/api/formatters/csv/csv.h b/web/api/formatters/csv/csv.h
new file mode 100644
index 0000000..666d4c6
--- /dev/null
+++ b/web/api/formatters/csv/csv.h
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_FORMATTER_CSV_H
+#define NETDATA_API_FORMATTER_CSV_H
+
+#include "web/api/queries/rrdr.h"
+
+void rrdr2csv(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS options, const char *startline, const char *separator, const char *endline, const char *betweenlines);
+
+#include "../rrd2json.h"
+
+#endif //NETDATA_API_FORMATTER_CSV_H
diff --git a/web/api/formatters/json/Makefile.am b/web/api/formatters/json/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/web/api/formatters/json/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/formatters/json/README.md b/web/api/formatters/json/README.md
new file mode 100644
index 0000000..a0f8108
--- /dev/null
+++ b/web/api/formatters/json/README.md
@@ -0,0 +1,156 @@
+<!--
+title: "JSON formatter"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/formatters/json/README.md
+-->
+
+# JSON formatter
+
+The CSV formatter presents [results of database queries](/web/api/queries/README.md) in the following formats:
+
+| format | content type | description|
+|:----:|:----------:|:----------|
+| `json` | application/json | return the query result as a json object|
+| `jsonp` | application/json | return the query result as a JSONP javascript callback|
+| `datatable` | application/json | return the query result as a Google `datatable`|
+| `datasource` | application/json | return the query result as a Google Visualization Provider `datasource` javascript callback|
+
+The CSV formatter respects the following API `&options=`:
+
+| option | supported | description|
+|:----:|:-------:|:----------|
+| `google_json` | yes | enable the Google flavor of JSON (using double quotes for strings and `Date()` function for dates|
+| `objectrows` | yes | return each row as an object, instead of an array|
+| `nonzero` | yes | to return only the dimensions that have at least a non-zero value|
+| `flip` | yes | to return the rows older to newer (the default is newer to older)|
+| `seconds` | yes | to return the date and time in unix timestamp|
+| `ms` | yes | to return the date and time in unit timestamp as milliseconds|
+| `percent` | yes | to replace all values with their percentage over the row total|
+| `abs` | yes | to turn all values positive|
+| `null2zero` | yes | to replace gaps with zeros (the default prints the string `null`|
+
+## Examples
+
+To show the differences between each format, in the following examples we query the same
+chart (having just one dimension called `active`), changing only the query `format` and its `options`.
+
+> Using `format=json` and `options=`
+
+```bash
+# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=nginx_local.connections&after=-3600&points=6&group=average&format=json&options='
+{
+ "labels": ["time", "active"],
+ "data":
+ [
+ [ 1540644600, 224.2516667],
+ [ 1540644000, 229.29],
+ [ 1540643400, 222.41],
+ [ 1540642800, 226.6816667],
+ [ 1540642200, 246.4083333],
+ [ 1540641600, 241.0966667]
+ ]
+}
+```
+
+> Using `format=json` and `options=objectrows`
+
+```bash
+# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=nginx_local.connections&after=-3600&points=6&group=average&format=json&options=objectrows'
+{
+ "labels": ["time", "active"],
+ "data":
+ [
+ { "time": 1540644600, "active": 224.2516667},
+ { "time": 1540644000, "active": 229.29},
+ { "time": 1540643400, "active": 222.41},
+ { "time": 1540642800, "active": 226.6816667},
+ { "time": 1540642200, "active": 246.4083333},
+ { "time": 1540641600, "active": 241.0966667}
+ ]
+}
+```
+
+> Using `format=json` and `options=objectrows,google_json`
+
+```bash
+# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=nginx_local.connections&after=-3600&points=6&group=average&formatjson&options=objectrows,google_json'
+{
+ "labels": ["time", "active"],
+ "data":
+ [
+ { "time": new Date(2018,9,27,12,50,0), "active": 224.2516667},
+ { "time": new Date(2018,9,27,12,40,0), "active": 229.29},
+ { "time": new Date(2018,9,27,12,30,0), "active": 222.41},
+ { "time": new Date(2018,9,27,12,20,0), "active": 226.6816667},
+ { "time": new Date(2018,9,27,12,10,0), "active": 246.4083333},
+ { "time": new Date(2018,9,27,12,0,0), "active": 241.0966667}
+ ]
+}
+```
+
+> Using `format=jsonp` and `options=`
+
+```bash
+curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=nginx_local.connections&after=-3600&points=6&group=average&formjsonp&options='
+callback({
+ "labels": ["time", "active"],
+ "data":
+ [
+ [ 1540645200, 235.885],
+ [ 1540644600, 224.2516667],
+ [ 1540644000, 229.29],
+ [ 1540643400, 222.41],
+ [ 1540642800, 226.6816667],
+ [ 1540642200, 246.4083333]
+ ]
+});
+```
+
+> Using `format=datatable` and `options=`
+
+```bash
+curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=nginx_local.connections&after=-3600&points=6&group=average&formdatatable&options='
+{
+ "cols":
+ [
+ {"id":"","label":"time","pattern":"","type":"datetime"},
+ {"id":"","label":"","pattern":"","type":"string","p":{"role":"annotation"}},
+ {"id":"","label":"","pattern":"","type":"string","p":{"role":"annotationText"}},
+ {"id":"","label":"active","pattern":"","type":"number"}
+ ],
+ "rows":
+ [
+ {"c":[{"v":"Date(2018,9,27,13,0,0)"},{"v":null},{"v":null},{"v":235.885}]},
+ {"c":[{"v":"Date(2018,9,27,12,50,0)"},{"v":null},{"v":null},{"v":224.2516667}]},
+ {"c":[{"v":"Date(2018,9,27,12,40,0)"},{"v":null},{"v":null},{"v":229.29}]},
+ {"c":[{"v":"Date(2018,9,27,12,30,0)"},{"v":null},{"v":null},{"v":222.41}]},
+ {"c":[{"v":"Date(2018,9,27,12,20,0)"},{"v":null},{"v":null},{"v":226.6816667}]},
+ {"c":[{"v":"Date(2018,9,27,12,10,0)"},{"v":null},{"v":null},{"v":246.4083333}]}
+ ]
+}
+```
+
+> Using `format=datasource` and `options=`
+
+```bash
+curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=nginx_local.connections&after=-3600&points=6&group=average&format=datasource&options='
+google.visualization.Query.setResponse({version:'0.6',reqId:'0',status:'ok',sig:'1540645368',table:{
+ "cols":
+ [
+ {"id":"","label":"time","pattern":"","type":"datetime"},
+ {"id":"","label":"","pattern":"","type":"string","p":{"role":"annotation"}},
+ {"id":"","label":"","pattern":"","type":"string","p":{"role":"annotationText"}},
+ {"id":"","label":"active","pattern":"","type":"number"}
+ ],
+ "rows":
+ [
+ {"c":[{"v":"Date(2018,9,27,13,0,0)"},{"v":null},{"v":null},{"v":235.885}]},
+ {"c":[{"v":"Date(2018,9,27,12,50,0)"},{"v":null},{"v":null},{"v":224.2516667}]},
+ {"c":[{"v":"Date(2018,9,27,12,40,0)"},{"v":null},{"v":null},{"v":229.29}]},
+ {"c":[{"v":"Date(2018,9,27,12,30,0)"},{"v":null},{"v":null},{"v":222.41}]},
+ {"c":[{"v":"Date(2018,9,27,12,20,0)"},{"v":null},{"v":null},{"v":226.6816667}]},
+ {"c":[{"v":"Date(2018,9,27,12,10,0)"},{"v":null},{"v":null},{"v":246.4083333}]}
+ ]
+}});
+```
+
+
diff --git a/web/api/formatters/json/json.c b/web/api/formatters/json/json.c
new file mode 100644
index 0000000..608150c
--- /dev/null
+++ b/web/api/formatters/json/json.c
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "json.h"
+
+#define JSON_DATES_JS 1
+#define JSON_DATES_TIMESTAMP 2
+
+void rrdr2json(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, int datatable) {
+ //info("RRD2JSON(): %s: BEGIN", r->st->id);
+ int row_annotations = 0, dates, dates_with_new = 0;
+ char kq[2] = "", // key quote
+ sq[2] = "", // string quote
+ pre_label[101] = "", // before each label
+ post_label[101] = "", // after each label
+ pre_date[101] = "", // the beginning of line, to the date
+ post_date[101] = "", // closing the date
+ pre_value[101] = "", // before each value
+ post_value[101] = "", // after each value
+ post_line[101] = "", // at the end of each row
+ normal_annotation[201] = "", // default row annotation
+ overflow_annotation[201] = "", // overflow row annotation
+ data_begin[101] = "", // between labels and values
+ finish[101] = "", // at the end of everything
+ object_rows_time[101] = "";
+
+ if(datatable) {
+ dates = JSON_DATES_JS;
+ if( options & RRDR_OPTION_GOOGLE_JSON ) {
+ kq[0] = '\0';
+ sq[0] = '\'';
+ }
+ else {
+ kq[0] = '"';
+ sq[0] = '"';
+ }
+ row_annotations = 1;
+ snprintfz(pre_date, 100, " {%sc%s:[{%sv%s:%s", kq, kq, kq, kq, sq);
+ snprintfz(post_date, 100, "%s}", sq);
+ snprintfz(pre_label, 100, ",\n {%sid%s:%s%s,%slabel%s:%s", kq, kq, sq, sq, kq, kq, sq);
+ snprintfz(post_label, 100, "%s,%spattern%s:%s%s,%stype%s:%snumber%s}", sq, kq, kq, sq, sq, kq, kq, sq, sq);
+ snprintfz(pre_value, 100, ",{%sv%s:", kq, kq);
+ strcpy(post_value, "}");
+ strcpy(post_line, "]}");
+ snprintfz(data_begin, 100, "\n ],\n %srows%s:\n [\n", kq, kq);
+ strcpy(finish, "\n]\n}");
+
+ snprintfz(overflow_annotation, 200, ",{%sv%s:%sRESET OR OVERFLOW%s},{%sv%s:%sThe counters have been wrapped.%s}", kq, kq, sq, sq, kq, kq, sq, sq);
+ snprintfz(normal_annotation, 200, ",{%sv%s:null},{%sv%s:null}", kq, kq, kq, kq);
+
+ buffer_sprintf(wb, "{\n %scols%s:\n [\n", kq, kq);
+ buffer_sprintf(wb, " {%sid%s:%s%s,%slabel%s:%stime%s,%spattern%s:%s%s,%stype%s:%sdatetime%s},\n", kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq);
+ buffer_sprintf(wb, " {%sid%s:%s%s,%slabel%s:%s%s,%spattern%s:%s%s,%stype%s:%sstring%s,%sp%s:{%srole%s:%sannotation%s}},\n", kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, kq, kq, sq, sq);
+ buffer_sprintf(wb, " {%sid%s:%s%s,%slabel%s:%s%s,%spattern%s:%s%s,%stype%s:%sstring%s,%sp%s:{%srole%s:%sannotationText%s}}", kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, kq, kq, sq, sq);
+
+ // remove the valueobjects flag
+ // google wants its own keys
+ if(options & RRDR_OPTION_OBJECTSROWS)
+ options &= ~RRDR_OPTION_OBJECTSROWS;
+ }
+ else {
+ kq[0] = '"';
+ sq[0] = '"';
+ if(options & RRDR_OPTION_GOOGLE_JSON) {
+ dates = JSON_DATES_JS;
+ dates_with_new = 1;
+ }
+ else {
+ dates = JSON_DATES_TIMESTAMP;
+ dates_with_new = 0;
+ }
+ if( options & RRDR_OPTION_OBJECTSROWS )
+ strcpy(pre_date, " { ");
+ else
+ strcpy(pre_date, " [ ");
+ strcpy(pre_label, ",\"");
+ strcpy(post_label, "\"");
+ strcpy(pre_value, ",");
+ if( options & RRDR_OPTION_OBJECTSROWS )
+ strcpy(post_line, "}");
+ else
+ strcpy(post_line, "]");
+ snprintfz(data_begin, 100, "],\n %sdata%s:\n [\n", kq, kq);
+ strcpy(finish, "\n]\n}");
+
+ buffer_sprintf(wb, "{\n %slabels%s: [", kq, kq);
+ buffer_sprintf(wb, "%stime%s", sq, sq);
+
+ if( options & RRDR_OPTION_OBJECTSROWS )
+ snprintfz(object_rows_time, 100, "%stime%s: ", kq, kq);
+
+ }
+
+ size_t pre_value_len = strlen(pre_value);
+ size_t post_value_len = strlen(post_value);
+ size_t pre_label_len = strlen(pre_label);
+ size_t post_label_len = strlen(post_label);
+ size_t pre_date_len = strlen(pre_date);
+ size_t post_date_len = strlen(post_date);
+ size_t post_line_len = strlen(post_line);
+ size_t normal_annotation_len = strlen(normal_annotation);
+ size_t overflow_annotation_len = strlen(overflow_annotation);
+ size_t object_rows_time_len = strlen(object_rows_time);
+
+ // -------------------------------------------------------------------------
+ // print the JSON header
+
+ QUERY_TARGET *qt = r->internal.qt;
+ long c, i;
+ const long used = qt->query.used;
+
+ // print the header lines
+ for(c = 0, i = 0; c < used ; c++) {
+ if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
+ if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
+
+ buffer_fast_strcat(wb, pre_label, pre_label_len);
+ buffer_strcat(wb, string2str(qt->query.array[c].dimension.name));
+ buffer_fast_strcat(wb, post_label, post_label_len);
+ i++;
+ }
+
+ if(!i) {
+ buffer_fast_strcat(wb, pre_label, pre_label_len);
+ buffer_fast_strcat(wb, "no data", 7);
+ buffer_fast_strcat(wb, post_label, post_label_len);
+ }
+ size_t total_number_of_dimensions = i;
+
+ // print the beginning of row data
+ buffer_strcat(wb, data_begin);
+
+ // if all dimensions are hidden, print a null
+ if(!i) {
+ buffer_strcat(wb, finish);
+ return;
+ }
+
+ long start = 0, end = rrdr_rows(r), step = 1;
+ if(!(options & RRDR_OPTION_REVERSED)) {
+ start = rrdr_rows(r) - 1;
+ end = -1;
+ step = -1;
+ }
+
+ // pre-allocate a large enough buffer for us
+ // this does not need to be accurate - it is just a hint to avoid multiple realloc().
+ buffer_need_bytes(wb,
+ ( 20 * rrdr_rows(r)) // timestamp + json overhead
+ + ( (pre_value_len + post_value_len + 4) * total_number_of_dimensions * rrdr_rows(r) ) // number
+ );
+
+ // for each line in the array
+ NETDATA_DOUBLE total = 1;
+ for(i = start; i != end ;i += step) {
+ NETDATA_DOUBLE *cn = &r->v[ i * r->d ];
+ RRDR_VALUE_FLAGS *co = &r->o[ i * r->d ];
+ NETDATA_DOUBLE *ar = &r->ar[ i * r->d ];
+
+ time_t now = r->t[i];
+
+ if(dates == JSON_DATES_JS) {
+ // generate the local date time
+ struct tm tmbuf, *tm = localtime_r(&now, &tmbuf);
+ if(!tm) { error("localtime_r() failed."); continue; }
+
+ if(likely(i != start)) buffer_fast_strcat(wb, ",\n", 2);
+ buffer_fast_strcat(wb, pre_date, pre_date_len);
+
+ if( options & RRDR_OPTION_OBJECTSROWS )
+ buffer_fast_strcat(wb, object_rows_time, object_rows_time_len);
+
+ if(unlikely(dates_with_new))
+ buffer_fast_strcat(wb, "new ", 4);
+
+ buffer_jsdate(wb, tm->tm_year + 1900, tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec);
+
+ buffer_fast_strcat(wb, post_date, post_date_len);
+
+ if(unlikely(row_annotations)) {
+ // google supports one annotation per row
+ int annotation_found = 0;
+ for(c = 0; c < used ; c++) {
+ if(unlikely(!(r->od[c] & RRDR_DIMENSION_SELECTED))) continue;
+
+ if(unlikely(co[c] & RRDR_VALUE_RESET)) {
+ buffer_fast_strcat(wb, overflow_annotation, overflow_annotation_len);
+ annotation_found = 1;
+ break;
+ }
+ }
+ if(likely(!annotation_found))
+ buffer_fast_strcat(wb, normal_annotation, normal_annotation_len);
+ }
+ }
+ else {
+ // print the timestamp of the line
+ if(likely(i != start))
+ buffer_fast_strcat(wb, ",\n", 2);
+
+ buffer_fast_strcat(wb, pre_date, pre_date_len);
+
+ if(unlikely( options & RRDR_OPTION_OBJECTSROWS ))
+ buffer_fast_strcat(wb, object_rows_time, object_rows_time_len);
+
+ buffer_rrd_value(wb, (NETDATA_DOUBLE)r->t[i]);
+
+ // in ms
+ if(unlikely(options & RRDR_OPTION_MILLISECONDS))
+ buffer_fast_strcat(wb, "000", 3);
+
+ buffer_fast_strcat(wb, post_date, post_date_len);
+ }
+
+ int set_min_max = 0;
+ if(unlikely(options & RRDR_OPTION_PERCENTAGE)) {
+ total = 0;
+ for(c = 0; c < used ;c++) {
+ NETDATA_DOUBLE n;
+ if(unlikely(options & RRDR_OPTION_INTERNAL_AR))
+ n = ar[c];
+ else
+ n = cn[c];
+
+ if(likely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
+ n = -n;
+
+ total += n;
+ }
+ // prevent a division by zero
+ if(total == 0) total = 1;
+ set_min_max = 1;
+ }
+
+ // for each dimension
+ for(c = 0; c < used ;c++) {
+ if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
+ if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
+
+ NETDATA_DOUBLE n;
+ if(unlikely(options & RRDR_OPTION_INTERNAL_AR))
+ n = ar[c];
+ else
+ n = cn[c];
+
+ buffer_fast_strcat(wb, pre_value, pre_value_len);
+
+ if(unlikely( options & RRDR_OPTION_OBJECTSROWS ))
+ buffer_sprintf(wb, "%s%s%s: ", kq, string2str(qt->query.array[c].dimension.name), kq);
+
+ if(co[c] & RRDR_VALUE_EMPTY && !(options & RRDR_OPTION_INTERNAL_AR)) {
+ if(unlikely(options & RRDR_OPTION_NULL2ZERO))
+ buffer_fast_strcat(wb, "0", 1);
+ else
+ buffer_fast_strcat(wb, "null", 4);
+ }
+ else {
+ if(unlikely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
+ n = -n;
+
+ if(unlikely(options & RRDR_OPTION_PERCENTAGE)) {
+ n = n * 100 / total;
+
+ if(unlikely(set_min_max)) {
+ r->min = r->max = n;
+ set_min_max = 0;
+ }
+
+ if(n < r->min) r->min = n;
+ if(n > r->max) r->max = n;
+ }
+
+ buffer_rrd_value(wb, n);
+ }
+
+ buffer_fast_strcat(wb, post_value, post_value_len);
+ }
+
+ buffer_fast_strcat(wb, post_line, post_line_len);
+ }
+
+ buffer_strcat(wb, finish);
+ //info("RRD2JSON(): %s: END", r->st->id);
+}
diff --git a/web/api/formatters/json/json.h b/web/api/formatters/json/json.h
new file mode 100644
index 0000000..fb59e5c
--- /dev/null
+++ b/web/api/formatters/json/json.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_FORMATTER_JSON_H
+#define NETDATA_API_FORMATTER_JSON_H
+
+#include "../rrd2json.h"
+
+void rrdr2json(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, int datatable);
+
+#endif //NETDATA_API_FORMATTER_JSON_H
diff --git a/web/api/formatters/json_wrapper.c b/web/api/formatters/json_wrapper.c
new file mode 100644
index 0000000..8b9b752
--- /dev/null
+++ b/web/api/formatters/json_wrapper.c
@@ -0,0 +1,423 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "json_wrapper.h"
+
+struct value_output {
+ int c;
+ BUFFER *wb;
+};
+
+static int value_list_output_callback(const DICTIONARY_ITEM *item __maybe_unused, void *entry, void *data) {
+ struct value_output *ap = (struct value_output *)data;
+ BUFFER *wb = ap->wb;
+ char *output = (char *) entry;
+ if(ap->c) buffer_strcat(wb, ",");
+ buffer_strcat(wb, output);
+ (ap->c)++;
+ return 0;
+}
+
+static int fill_formatted_callback(const char *name, const char *value, RRDLABEL_SRC ls, void *data) {
+ (void)ls;
+ DICTIONARY *dict = (DICTIONARY *)data;
+ char n[RRD_ID_LENGTH_MAX * 2 + 2];
+ char output[RRD_ID_LENGTH_MAX * 2 + 8];
+ char v[RRD_ID_LENGTH_MAX * 2 + 1];
+
+ sanitize_json_string(v, (char *)value, RRD_ID_LENGTH_MAX * 2);
+ int len = snprintfz(output, RRD_ID_LENGTH_MAX * 2 + 7, "[\"%s\", \"%s\"]", name, v);
+ snprintfz(n, RRD_ID_LENGTH_MAX * 2, "%s:%s", name, v);
+ dictionary_set(dict, n, output, len + 1);
+
+ return 1;
+}
+
+void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS options, int string_value,
+ RRDR_GROUPING group_method)
+{
+ QUERY_TARGET *qt = r->internal.qt;
+
+ long rows = rrdr_rows(r);
+ long c, i;
+ const long query_used = qt->query.used;
+
+ //info("JSONWRAPPER(): %s: BEGIN", r->st->id);
+ char kq[2] = "", // key quote
+ sq[2] = ""; // string quote
+
+ if( options & RRDR_OPTION_GOOGLE_JSON ) {
+ kq[0] = '\0';
+ sq[0] = '\'';
+ }
+ else {
+ kq[0] = '"';
+ sq[0] = '"';
+ }
+
+ buffer_sprintf(wb, "{\n"
+ " %sapi%s: 1,\n"
+ " %sid%s: %s%s%s,\n"
+ " %sname%s: %s%s%s,\n"
+ " %sview_update_every%s: %lld,\n"
+ " %supdate_every%s: %lld,\n"
+ " %sfirst_entry%s: %lld,\n"
+ " %slast_entry%s: %lld,\n"
+ " %sbefore%s: %lld,\n"
+ " %safter%s: %lld,\n"
+ " %sgroup%s: %s%s%s,\n"
+ " %soptions%s: %s"
+ , kq, kq
+ , kq, kq, sq, qt->id, sq
+ , kq, kq, sq, qt->id, sq
+ , kq, kq, (long long)r->update_every
+ , kq, kq, (long long)qt->db.minimum_latest_update_every
+ , kq, kq, (long long)qt->db.first_time_t
+ , kq, kq, (long long)qt->db.last_time_t
+ , kq, kq, (long long)r->before
+ , kq, kq, (long long)r->after
+ , kq, kq, sq, web_client_api_request_v1_data_group_to_string(group_method), sq
+ , kq, kq, sq);
+
+ web_client_api_request_v1_data_options_to_buffer(wb, r->internal.query_options);
+
+ buffer_sprintf(wb, "%s,\n %sdimension_names%s: [", sq, kq, kq);
+
+ for(c = 0, i = 0; c < query_used ; c++) {
+ if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
+ if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
+
+ if(i) buffer_strcat(wb, ", ");
+ buffer_strcat(wb, sq);
+ buffer_strcat(wb, string2str(qt->query.array[c].dimension.name));
+ buffer_strcat(wb, sq);
+ i++;
+ }
+ if(!i) {
+#ifdef NETDATA_INTERNAL_CHECKS
+ error("QUERY: '%s', RRDR is empty, %zu dimensions, options is 0x%08x", qt->id, r->d, options);
+#endif
+ rows = 0;
+ buffer_strcat(wb, sq);
+ buffer_strcat(wb, "no data");
+ buffer_strcat(wb, sq);
+ }
+
+ buffer_sprintf(wb, "],\n"
+ " %sdimension_ids%s: ["
+ , kq, kq);
+
+ for(c = 0, i = 0; c < query_used ; c++) {
+ if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
+ if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
+
+ if(i) buffer_strcat(wb, ", ");
+ buffer_strcat(wb, sq);
+ buffer_strcat(wb, string2str(qt->query.array[c].dimension.id));
+ buffer_strcat(wb, sq);
+ i++;
+ }
+ if(!i) {
+ rows = 0;
+ buffer_strcat(wb, sq);
+ buffer_strcat(wb, "no data");
+ buffer_strcat(wb, sq);
+ }
+ buffer_strcat(wb, "],\n");
+
+ if (r->internal.query_options & RRDR_OPTION_ALL_DIMENSIONS) {
+ buffer_sprintf(wb, " %sfull_dimension_list%s: [", kq, kq);
+
+ char name[RRD_ID_LENGTH_MAX * 2 + 2];
+ char output[RRD_ID_LENGTH_MAX * 2 + 8];
+
+ struct value_output co = {.c = 0, .wb = wb};
+
+ DICTIONARY *dict = dictionary_create(DICT_OPTION_SINGLE_THREADED|DICT_OPTION_DONT_OVERWRITE_VALUE);
+ for (c = 0; c < (long)qt->metrics.used ;c++) {
+ snprintfz(name, RRD_ID_LENGTH_MAX * 2 + 1, "%s:%s",
+ rrdmetric_acquired_id(qt->metrics.array[c]),
+ rrdmetric_acquired_name(qt->metrics.array[c]));
+
+ int len = snprintfz(output, RRD_ID_LENGTH_MAX * 2 + 7, "[\"%s\",\"%s\"]",
+ rrdmetric_acquired_id(qt->metrics.array[c]),
+ rrdmetric_acquired_name(qt->metrics.array[c]));
+
+ dictionary_set(dict, name, output, len + 1);
+ }
+ dictionary_walkthrough_read(dict, value_list_output_callback, &co);
+ dictionary_destroy(dict);
+
+ co.c = 0;
+ buffer_sprintf(wb, "],\n %sfull_chart_list%s: [", kq, kq);
+ dict = dictionary_create(DICT_OPTION_SINGLE_THREADED|DICT_OPTION_DONT_OVERWRITE_VALUE);
+ for (c = 0; c < (long)qt->instances.used ; c++) {
+ RRDINSTANCE_ACQUIRED *ria = qt->instances.array[c];
+
+ snprintfz(name, RRD_ID_LENGTH_MAX * 2 + 1, "%s:%s",
+ rrdinstance_acquired_id(ria),
+ rrdinstance_acquired_name(ria));
+
+ int len = snprintfz(output, RRD_ID_LENGTH_MAX * 2 + 7, "[\"%s\",\"%s\"]",
+ rrdinstance_acquired_id(ria),
+ rrdinstance_acquired_name(ria));
+
+ dictionary_set(dict, name, output, len + 1);
+ }
+ dictionary_walkthrough_read(dict, value_list_output_callback, &co);
+ dictionary_destroy(dict);
+
+ co.c = 0;
+ buffer_sprintf(wb, "],\n %sfull_chart_labels%s: [", kq, kq);
+ dict = dictionary_create(DICT_OPTION_SINGLE_THREADED|DICT_OPTION_DONT_OVERWRITE_VALUE);
+ for (c = 0; c < (long)qt->instances.used ; c++) {
+ RRDINSTANCE_ACQUIRED *ria = qt->instances.array[c];
+ rrdlabels_walkthrough_read(rrdinstance_acquired_labels(ria), fill_formatted_callback, dict);
+ }
+ dictionary_walkthrough_read(dict, value_list_output_callback, &co);
+ dictionary_destroy(dict);
+ buffer_strcat(wb, "],\n");
+ }
+
+ // functions
+ {
+ DICTIONARY *funcs = dictionary_create(DICT_OPTION_SINGLE_THREADED|DICT_OPTION_DONT_OVERWRITE_VALUE);
+ RRDINSTANCE_ACQUIRED *ria = NULL;
+ for (c = 0; c < query_used ; c++) {
+ QUERY_METRIC *qm = &qt->query.array[c];
+ if(qm->link.ria == ria)
+ continue;
+
+ ria = qm->link.ria;
+ chart_functions_to_dict(rrdinstance_acquired_functions(ria), funcs);
+ }
+
+ buffer_sprintf(wb, " %sfunctions%s: [", kq, kq);
+ void *t; (void)t;
+ dfe_start_read(funcs, t) {
+ const char *comma = "";
+ if(t_dfe.counter) comma = ", ";
+ buffer_sprintf(wb, "%s%s%s%s", comma, sq, t_dfe.name, sq);
+ }
+ dfe_done(t);
+ dictionary_destroy(funcs);
+ buffer_strcat(wb, "],\n");
+ }
+
+ // context query
+ if (!qt->request.st) {
+ buffer_sprintf(
+ wb,
+ " %schart_ids%s: [",
+ kq, kq);
+
+ for (c = 0, i = 0; c < query_used; c++) {
+ QUERY_METRIC *qm = &qt->query.array[c];
+
+ if (unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN))
+ continue;
+
+ if (unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO)))
+ continue;
+
+ if (i)
+ buffer_strcat(wb, ", ");
+ buffer_strcat(wb, sq);
+ buffer_strcat(wb, string2str(qm->chart.id));
+ buffer_strcat(wb, sq);
+ i++;
+ }
+ if (!i) {
+ rows = 0;
+ buffer_strcat(wb, sq);
+ buffer_strcat(wb, "no data");
+ buffer_strcat(wb, sq);
+ }
+ buffer_strcat(wb, "],\n");
+ if (qt->instances.chart_label_key_pattern) {
+ buffer_sprintf(wb, " %schart_labels%s: { ", kq, kq);
+
+ SIMPLE_PATTERN *pattern = qt->instances.chart_label_key_pattern;
+ char *label_key = NULL;
+ int keys = 0;
+ while (pattern && (label_key = simple_pattern_iterate(&pattern))) {
+ if (keys)
+ buffer_strcat(wb, ", ");
+ buffer_sprintf(wb, "%s%s%s : [", kq, label_key, kq);
+ keys++;
+
+ for (c = 0, i = 0; c < query_used; c++) {
+ QUERY_METRIC *qm = &qt->query.array[c];
+
+ if (unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN))
+ continue;
+ if (unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO)))
+ continue;
+
+ if (i)
+ buffer_strcat(wb, ", ");
+ rrdlabels_get_value_to_buffer_or_null(rrdinstance_acquired_labels(qm->link.ria), wb, label_key, sq, "null");
+ i++;
+ }
+ if (!i) {
+ rows = 0;
+ buffer_strcat(wb, sq);
+ buffer_strcat(wb, "no data");
+ buffer_strcat(wb, sq);
+ }
+ buffer_strcat(wb, "]");
+ }
+ buffer_strcat(wb, "},\n");
+ }
+ }
+
+ buffer_sprintf(wb, " %slatest_values%s: ["
+ , kq, kq);
+
+ for(c = 0, i = 0; c < query_used ;c++) {
+ QUERY_METRIC *qm = &qt->query.array[c];
+
+ if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
+ if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
+
+ if(i) buffer_strcat(wb, ", ");
+ i++;
+
+ NETDATA_DOUBLE value = rrdmetric_acquired_last_stored_value(qm->link.rma);
+ if (NAN == value)
+ buffer_strcat(wb, "null");
+ else
+ buffer_rrd_value(wb, value);
+ }
+ if(!i) {
+ rows = 0;
+ buffer_strcat(wb, "null");
+ }
+
+ buffer_sprintf(wb, "],\n"
+ " %sview_latest_values%s: ["
+ , kq, kq);
+
+ i = 0;
+ if(rows) {
+ NETDATA_DOUBLE total = 1;
+
+ if(unlikely(options & RRDR_OPTION_PERCENTAGE)) {
+ total = 0;
+ for(c = 0; c < query_used ;c++) {
+ NETDATA_DOUBLE *cn = &r->v[ (rrdr_rows(r) - 1) * r->d ];
+ NETDATA_DOUBLE n = cn[c];
+
+ if(likely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
+ n = -n;
+
+ total += n;
+ }
+ // prevent a division by zero
+ if(total == 0) total = 1;
+ }
+
+ for(c = 0, i = 0; c < query_used ;c++) {
+ if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
+ if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
+
+ if(i) buffer_strcat(wb, ", ");
+ i++;
+
+ NETDATA_DOUBLE *cn = &r->v[ (rrdr_rows(r) - 1) * r->d ];
+ RRDR_VALUE_FLAGS *co = &r->o[ (rrdr_rows(r) - 1) * r->d ];
+ NETDATA_DOUBLE n = cn[c];
+
+ if(co[c] & RRDR_VALUE_EMPTY) {
+ if(options & RRDR_OPTION_NULL2ZERO)
+ buffer_strcat(wb, "0");
+ else
+ buffer_strcat(wb, "null");
+ }
+ else {
+ if(unlikely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
+ n = -n;
+
+ if(unlikely(options & RRDR_OPTION_PERCENTAGE))
+ n = n * 100 / total;
+
+ buffer_rrd_value(wb, n);
+ }
+ }
+ }
+ if(!i) {
+ rows = 0;
+ buffer_strcat(wb, "null");
+ }
+
+ buffer_sprintf(wb, "],\n"
+ " %sdimensions%s: %ld,\n"
+ " %spoints%s: %ld,\n"
+ " %sformat%s: %s"
+ , kq, kq, i
+ , kq, kq, rows
+ , kq, kq, sq
+ );
+
+ rrdr_buffer_print_format(wb, format);
+
+ buffer_sprintf(wb, "%s,\n"
+ " %sdb_points_per_tier%s: [ "
+ , sq
+ , kq, kq
+ );
+
+ for(size_t tier = 0; tier < storage_tiers ; tier++)
+ buffer_sprintf(wb, "%s%zu", tier>0?", ":"", r->internal.tier_points_read[tier]);
+
+ buffer_strcat(wb, " ]");
+
+ buffer_sprintf(wb, ",\n %sresult%s: ", kq, kq);
+
+ if(string_value) buffer_strcat(wb, sq);
+ //info("JSONWRAPPER(): %s: END", r->st->id);
+}
+
+void rrdr_json_wrapper_anomaly_rates(RRDR *r, BUFFER *wb, uint32_t format, uint32_t options, int string_value) {
+ (void)r;
+ (void)format;
+
+ char kq[2] = "", // key quote
+ sq[2] = ""; // string quote
+
+ if( options & RRDR_OPTION_GOOGLE_JSON ) {
+ kq[0] = '\0';
+ sq[0] = '\'';
+ }
+ else {
+ kq[0] = '"';
+ sq[0] = '"';
+ }
+
+ if(string_value) buffer_strcat(wb, sq);
+
+ buffer_sprintf(wb, ",\n %sanomaly_rates%s: ", kq, kq);
+}
+
+void rrdr_json_wrapper_end(RRDR *r, BUFFER *wb, uint32_t format, uint32_t options, int string_value) {
+ (void)format;
+
+ char kq[2] = "", // key quote
+ sq[2] = ""; // string quote
+
+ if( options & RRDR_OPTION_GOOGLE_JSON ) {
+ kq[0] = '\0';
+ sq[0] = '\'';
+ }
+ else {
+ kq[0] = '"';
+ sq[0] = '"';
+ }
+
+ if(string_value) buffer_strcat(wb, sq);
+
+ buffer_sprintf(wb, ",\n %smin%s: ", kq, kq);
+ buffer_rrd_value(wb, r->min);
+ buffer_sprintf(wb, ",\n %smax%s: ", kq, kq);
+ buffer_rrd_value(wb, r->max);
+ buffer_strcat(wb, "\n}\n");
+}
diff --git a/web/api/formatters/json_wrapper.h b/web/api/formatters/json_wrapper.h
new file mode 100644
index 0000000..91c1475
--- /dev/null
+++ b/web/api/formatters/json_wrapper.h
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_FORMATTER_JSON_WRAPPER_H
+#define NETDATA_API_FORMATTER_JSON_WRAPPER_H
+
+#include "rrd2json.h"
+#include "web/api/queries/query.h"
+
+
+void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS options, int string_value,
+ RRDR_GROUPING group_method);
+void rrdr_json_wrapper_anomaly_rates(RRDR *r, BUFFER *wb, uint32_t format, uint32_t options, int string_value);
+void rrdr_json_wrapper_end(RRDR *r, BUFFER *wb, uint32_t format, uint32_t options, int string_value);
+
+#endif //NETDATA_API_FORMATTER_JSON_WRAPPER_H
diff --git a/web/api/formatters/rrd2json.c b/web/api/formatters/rrd2json.c
new file mode 100644
index 0000000..8bf5471
--- /dev/null
+++ b/web/api/formatters/rrd2json.c
@@ -0,0 +1,339 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "web/api/web_api_v1.h"
+#include "database/storage_engine.h"
+
+void rrd_stats_api_v1_chart(RRDSET *st, BUFFER *wb) {
+ rrdset2json(st, wb, NULL, NULL, 0);
+}
+
+void rrdr_buffer_print_format(BUFFER *wb, uint32_t format) {
+ switch(format) {
+ case DATASOURCE_JSON:
+ buffer_strcat(wb, DATASOURCE_FORMAT_JSON);
+ break;
+
+ case DATASOURCE_DATATABLE_JSON:
+ buffer_strcat(wb, DATASOURCE_FORMAT_DATATABLE_JSON);
+ break;
+
+ case DATASOURCE_DATATABLE_JSONP:
+ buffer_strcat(wb, DATASOURCE_FORMAT_DATATABLE_JSONP);
+ break;
+
+ case DATASOURCE_JSONP:
+ buffer_strcat(wb, DATASOURCE_FORMAT_JSONP);
+ break;
+
+ case DATASOURCE_SSV:
+ buffer_strcat(wb, DATASOURCE_FORMAT_SSV);
+ break;
+
+ case DATASOURCE_CSV:
+ buffer_strcat(wb, DATASOURCE_FORMAT_CSV);
+ break;
+
+ case DATASOURCE_TSV:
+ buffer_strcat(wb, DATASOURCE_FORMAT_TSV);
+ break;
+
+ case DATASOURCE_HTML:
+ buffer_strcat(wb, DATASOURCE_FORMAT_HTML);
+ break;
+
+ case DATASOURCE_JS_ARRAY:
+ buffer_strcat(wb, DATASOURCE_FORMAT_JS_ARRAY);
+ break;
+
+ case DATASOURCE_SSV_COMMA:
+ buffer_strcat(wb, DATASOURCE_FORMAT_SSV_COMMA);
+ break;
+
+ default:
+ buffer_strcat(wb, "unknown");
+ break;
+ }
+}
+
+int rrdset2value_api_v1(
+ RRDSET *st
+ , BUFFER *wb
+ , NETDATA_DOUBLE *n
+ , const char *dimensions
+ , size_t points
+ , time_t after
+ , time_t before
+ , RRDR_GROUPING group_method
+ , const char *group_options
+ , time_t resampling_time
+ , uint32_t options
+ , time_t *db_after
+ , time_t *db_before
+ , size_t *db_points_read
+ , size_t *db_points_per_tier
+ , size_t *result_points_generated
+ , int *value_is_null
+ , NETDATA_DOUBLE *anomaly_rate
+ , time_t timeout
+ , size_t tier
+ , QUERY_SOURCE query_source
+) {
+ int ret = HTTP_RESP_INTERNAL_SERVER_ERROR;
+
+ ONEWAYALLOC *owa = onewayalloc_create(0);
+ RRDR *r = rrd2rrdr_legacy(
+ owa,
+ st,
+ points,
+ after,
+ before,
+ group_method,
+ resampling_time,
+ options,
+ dimensions,
+ group_options,
+ timeout,
+ tier,
+ query_source);
+
+ if(!r) {
+ if(value_is_null) *value_is_null = 1;
+ ret = HTTP_RESP_INTERNAL_SERVER_ERROR;
+ goto cleanup;
+ }
+
+ if(db_points_read)
+ *db_points_read += r->internal.db_points_read;
+
+ if(db_points_per_tier) {
+ for(size_t t = 0; t < storage_tiers ;t++)
+ db_points_per_tier[t] += r->internal.tier_points_read[t];
+ }
+
+ if(result_points_generated)
+ *result_points_generated += r->internal.result_points_generated;
+
+ if(rrdr_rows(r) == 0) {
+ if(db_after) *db_after = 0;
+ if(db_before) *db_before = 0;
+ if(value_is_null) *value_is_null = 1;
+
+ ret = HTTP_RESP_BAD_REQUEST;
+ goto cleanup;
+ }
+
+ if(wb) {
+ if (r->result_options & RRDR_RESULT_OPTION_RELATIVE)
+ buffer_no_cacheable(wb);
+ else if (r->result_options & RRDR_RESULT_OPTION_ABSOLUTE)
+ buffer_cacheable(wb);
+ }
+
+ if(db_after) *db_after = r->after;
+ if(db_before) *db_before = r->before;
+
+ long i = (!(options & RRDR_OPTION_REVERSED))?(long)rrdr_rows(r) - 1:0;
+ *n = rrdr2value(r, i, options, value_is_null, anomaly_rate);
+ ret = HTTP_RESP_OK;
+
+cleanup:
+ rrdr_free(owa, r);
+ onewayalloc_destroy(owa);
+ return ret;
+}
+
+int data_query_execute(ONEWAYALLOC *owa, BUFFER *wb, QUERY_TARGET *qt, time_t *latest_timestamp) {
+
+ RRDR *r = rrd2rrdr(owa, qt);
+ if(!r) {
+ buffer_strcat(wb, "Cannot generate output with these parameters on this chart.");
+ return HTTP_RESP_INTERNAL_SERVER_ERROR;
+ }
+
+ if (r->result_options & RRDR_RESULT_OPTION_CANCEL) {
+ rrdr_free(owa, r);
+ return HTTP_RESP_BACKEND_FETCH_FAILED;
+ }
+
+ if(r->result_options & RRDR_RESULT_OPTION_RELATIVE)
+ buffer_no_cacheable(wb);
+ else if(r->result_options & RRDR_RESULT_OPTION_ABSOLUTE)
+ buffer_cacheable(wb);
+
+ if(latest_timestamp && rrdr_rows(r) > 0)
+ *latest_timestamp = r->before;
+
+ DATASOURCE_FORMAT format = qt->request.format;
+ RRDR_OPTIONS options = qt->request.options;
+ RRDR_GROUPING group_method = qt->request.group_method;
+
+ switch(format) {
+ case DATASOURCE_SSV:
+ if(options & RRDR_OPTION_JSON_WRAP) {
+ wb->contenttype = CT_APPLICATION_JSON;
+ rrdr_json_wrapper_begin(r, wb, format, options, 1, group_method);
+ rrdr2ssv(r, wb, options, "", " ", "");
+ rrdr_json_wrapper_end(r, wb, format, options, 1);
+ }
+ else {
+ wb->contenttype = CT_TEXT_PLAIN;
+ rrdr2ssv(r, wb, options, "", " ", "");
+ }
+ break;
+
+ case DATASOURCE_SSV_COMMA:
+ if(options & RRDR_OPTION_JSON_WRAP) {
+ wb->contenttype = CT_APPLICATION_JSON;
+ rrdr_json_wrapper_begin(r, wb, format, options, 1, group_method);
+ rrdr2ssv(r, wb, options, "", ",", "");
+ rrdr_json_wrapper_end(r, wb, format, options, 1);
+ }
+ else {
+ wb->contenttype = CT_TEXT_PLAIN;
+ rrdr2ssv(r, wb, options, "", ",", "");
+ }
+ break;
+
+ case DATASOURCE_JS_ARRAY:
+ if(options & RRDR_OPTION_JSON_WRAP) {
+ wb->contenttype = CT_APPLICATION_JSON;
+ rrdr_json_wrapper_begin(r, wb, format, options, 0, group_method);
+ rrdr2ssv(r, wb, options, "[", ",", "]");
+ rrdr_json_wrapper_end(r, wb, format, options, 0);
+ }
+ else {
+ wb->contenttype = CT_APPLICATION_JSON;
+ rrdr2ssv(r, wb, options, "[", ",", "]");
+ }
+ break;
+
+ case DATASOURCE_CSV:
+ if(options & RRDR_OPTION_JSON_WRAP) {
+ wb->contenttype = CT_APPLICATION_JSON;
+ rrdr_json_wrapper_begin(r, wb, format, options, 1, group_method);
+ rrdr2csv(r, wb, format, options, "", ",", "\\n", "");
+ rrdr_json_wrapper_end(r, wb, format, options, 1);
+ }
+ else {
+ wb->contenttype = CT_TEXT_PLAIN;
+ rrdr2csv(r, wb, format, options, "", ",", "\r\n", "");
+ }
+ break;
+
+ case DATASOURCE_CSV_MARKDOWN:
+ if(options & RRDR_OPTION_JSON_WRAP) {
+ wb->contenttype = CT_APPLICATION_JSON;
+ rrdr_json_wrapper_begin(r, wb, format, options, 1, group_method);
+ rrdr2csv(r, wb, format, options, "", "|", "\\n", "");
+ rrdr_json_wrapper_end(r, wb, format, options, 1);
+ }
+ else {
+ wb->contenttype = CT_TEXT_PLAIN;
+ rrdr2csv(r, wb, format, options, "", "|", "\r\n", "");
+ }
+ break;
+
+ case DATASOURCE_CSV_JSON_ARRAY:
+ wb->contenttype = CT_APPLICATION_JSON;
+ if(options & RRDR_OPTION_JSON_WRAP) {
+ rrdr_json_wrapper_begin(r, wb, format, options, 0, group_method);
+ buffer_strcat(wb, "[\n");
+ rrdr2csv(r, wb, format, options + RRDR_OPTION_LABEL_QUOTES, "[", ",", "]", ",\n");
+ buffer_strcat(wb, "\n]");
+ rrdr_json_wrapper_end(r, wb, format, options, 0);
+ }
+ else {
+ wb->contenttype = CT_APPLICATION_JSON;
+ buffer_strcat(wb, "[\n");
+ rrdr2csv(r, wb, format, options + RRDR_OPTION_LABEL_QUOTES, "[", ",", "]", ",\n");
+ buffer_strcat(wb, "\n]");
+ }
+ break;
+
+ case DATASOURCE_TSV:
+ if(options & RRDR_OPTION_JSON_WRAP) {
+ wb->contenttype = CT_APPLICATION_JSON;
+ rrdr_json_wrapper_begin(r, wb, format, options, 1, group_method);
+ rrdr2csv(r, wb, format, options, "", "\t", "\\n", "");
+ rrdr_json_wrapper_end(r, wb, format, options, 1);
+ }
+ else {
+ wb->contenttype = CT_TEXT_PLAIN;
+ rrdr2csv(r, wb, format, options, "", "\t", "\r\n", "");
+ }
+ break;
+
+ case DATASOURCE_HTML:
+ if(options & RRDR_OPTION_JSON_WRAP) {
+ wb->contenttype = CT_APPLICATION_JSON;
+ rrdr_json_wrapper_begin(r, wb, format, options, 1, group_method);
+ buffer_strcat(wb, "<html>\\n<center>\\n<table border=\\\"0\\\" cellpadding=\\\"5\\\" cellspacing=\\\"5\\\">\\n");
+ rrdr2csv(r, wb, format, options, "<tr><td>", "</td><td>", "</td></tr>\\n", "");
+ buffer_strcat(wb, "</table>\\n</center>\\n</html>\\n");
+ rrdr_json_wrapper_end(r, wb, format, options, 1);
+ }
+ else {
+ wb->contenttype = CT_TEXT_HTML;
+ buffer_strcat(wb, "<html>\n<center>\n<table border=\"0\" cellpadding=\"5\" cellspacing=\"5\">\n");
+ rrdr2csv(r, wb, format, options, "<tr><td>", "</td><td>", "</td></tr>\n", "");
+ buffer_strcat(wb, "</table>\n</center>\n</html>\n");
+ }
+ break;
+
+ case DATASOURCE_DATATABLE_JSONP:
+ wb->contenttype = CT_APPLICATION_X_JAVASCRIPT;
+
+ if(options & RRDR_OPTION_JSON_WRAP)
+ rrdr_json_wrapper_begin(r, wb, format, options, 0, group_method);
+
+ rrdr2json(r, wb, options, 1);
+
+ if(options & RRDR_OPTION_JSON_WRAP)
+ rrdr_json_wrapper_end(r, wb, format, options, 0);
+ break;
+
+ case DATASOURCE_DATATABLE_JSON:
+ wb->contenttype = CT_APPLICATION_JSON;
+
+ if(options & RRDR_OPTION_JSON_WRAP)
+ rrdr_json_wrapper_begin(r, wb, format, options, 0, group_method);
+
+ rrdr2json(r, wb, options, 1);
+
+ if(options & RRDR_OPTION_JSON_WRAP)
+ rrdr_json_wrapper_end(r, wb, format, options, 0);
+ break;
+
+ case DATASOURCE_JSONP:
+ wb->contenttype = CT_APPLICATION_X_JAVASCRIPT;
+ if(options & RRDR_OPTION_JSON_WRAP)
+ rrdr_json_wrapper_begin(r, wb, format, options, 0, group_method);
+
+ rrdr2json(r, wb, options, 0);
+
+ if(options & RRDR_OPTION_JSON_WRAP)
+ rrdr_json_wrapper_end(r, wb, format, options, 0);
+ break;
+
+ case DATASOURCE_JSON:
+ default:
+ wb->contenttype = CT_APPLICATION_JSON;
+
+ if(options & RRDR_OPTION_JSON_WRAP)
+ rrdr_json_wrapper_begin(r, wb, format, options, 0, group_method);
+
+ rrdr2json(r, wb, options, 0);
+
+ if(options & RRDR_OPTION_JSON_WRAP) {
+ if(options & RRDR_OPTION_RETURN_JWAR) {
+ rrdr_json_wrapper_anomaly_rates(r, wb, format, options, 0);
+ rrdr2json(r, wb, options | RRDR_OPTION_INTERNAL_AR, 0);
+ }
+ rrdr_json_wrapper_end(r, wb, format, options, 0);
+ }
+ break;
+ }
+
+ rrdr_free(owa, r);
+ return HTTP_RESP_OK;
+}
diff --git a/web/api/formatters/rrd2json.h b/web/api/formatters/rrd2json.h
new file mode 100644
index 0000000..048281d
--- /dev/null
+++ b/web/api/formatters/rrd2json.h
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_RRD2JSON_H
+#define NETDATA_RRD2JSON_H 1
+
+#include "web/api/web_api_v1.h"
+
+#include "web/api/exporters/allmetrics.h"
+#include "web/api/queries/rrdr.h"
+
+#include "web/api/formatters/csv/csv.h"
+#include "web/api/formatters/ssv/ssv.h"
+#include "web/api/formatters/json/json.h"
+#include "web/api/formatters/value/value.h"
+
+#include "web/api/formatters/rrdset2json.h"
+#include "web/api/formatters/charts2json.h"
+#include "web/api/formatters/json_wrapper.h"
+
+#include "web/server/web_client.h"
+
+#define HOSTNAME_MAX 1024
+
+#define API_RELATIVE_TIME_MAX (3 * 365 * 86400)
+
+// type of JSON generations
+typedef enum {
+ DATASOURCE_JSON = 0,
+ DATASOURCE_DATATABLE_JSON = 1,
+ DATASOURCE_DATATABLE_JSONP = 2,
+ DATASOURCE_SSV = 3,
+ DATASOURCE_CSV = 4,
+ DATASOURCE_JSONP = 5,
+ DATASOURCE_TSV = 6,
+ DATASOURCE_HTML = 7,
+ DATASOURCE_JS_ARRAY = 8,
+ DATASOURCE_SSV_COMMA = 9,
+ DATASOURCE_CSV_JSON_ARRAY = 10,
+ DATASOURCE_CSV_MARKDOWN = 11,
+} DATASOURCE_FORMAT;
+
+#define DATASOURCE_FORMAT_JSON "json"
+#define DATASOURCE_FORMAT_DATATABLE_JSON "datatable"
+#define DATASOURCE_FORMAT_DATATABLE_JSONP "datasource"
+#define DATASOURCE_FORMAT_JSONP "jsonp"
+#define DATASOURCE_FORMAT_SSV "ssv"
+#define DATASOURCE_FORMAT_CSV "csv"
+#define DATASOURCE_FORMAT_TSV "tsv"
+#define DATASOURCE_FORMAT_HTML "html"
+#define DATASOURCE_FORMAT_JS_ARRAY "array"
+#define DATASOURCE_FORMAT_SSV_COMMA "ssvcomma"
+#define DATASOURCE_FORMAT_CSV_JSON_ARRAY "csvjsonarray"
+#define DATASOURCE_FORMAT_CSV_MARKDOWN "markdown"
+
+void rrd_stats_api_v1_chart(RRDSET *st, BUFFER *wb);
+void rrdr_buffer_print_format(BUFFER *wb, uint32_t format);
+
+int data_query_execute(ONEWAYALLOC *owa, BUFFER *wb, struct query_target *qt, time_t *latest_timestamp);
+
+int rrdset2value_api_v1(
+ RRDSET *st
+ , BUFFER *wb
+ , NETDATA_DOUBLE *n
+ , const char *dimensions
+ , size_t points
+ , time_t after
+ , time_t before
+ , RRDR_GROUPING group_method
+ , const char *group_options
+ , time_t resampling_time
+ , uint32_t options
+ , time_t *db_after
+ , time_t *db_before
+ , size_t *db_points_read
+ , size_t *db_points_per_tier
+ , size_t *result_points_generated
+ , int *value_is_null
+ , NETDATA_DOUBLE *anomaly_rate
+ , time_t timeout
+ , size_t tier
+ , QUERY_SOURCE query_source
+);
+
+#endif /* NETDATA_RRD2JSON_H */
diff --git a/web/api/formatters/rrdset2json.c b/web/api/formatters/rrdset2json.c
new file mode 100644
index 0000000..1e81063
--- /dev/null
+++ b/web/api/formatters/rrdset2json.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "rrdset2json.h"
+
+void chart_labels2json(RRDSET *st, BUFFER *wb, size_t indentation)
+{
+ if(unlikely(!st->rrdlabels))
+ return;
+
+ char tabs[11];
+
+ if (indentation > 10)
+ indentation = 10;
+
+ tabs[0] = '\0';
+ while (indentation) {
+ strcat(tabs, "\t\t");
+ indentation--;
+ }
+
+ rrdlabels_to_buffer(st->rrdlabels, wb, tabs, ":", "\"", ",\n", NULL, NULL, NULL, NULL);
+ buffer_strcat(wb, "\n");
+}
+
+// generate JSON for the /api/v1/chart API call
+
+void rrdset2json(RRDSET *st, BUFFER *wb, size_t *dimensions_count, size_t *memory_used, int skip_volatile) {
+ time_t first_entry_t = rrdset_first_entry_t(st);
+ time_t last_entry_t = rrdset_last_entry_t(st);
+
+ buffer_sprintf(
+ wb,
+ "\t\t{\n"
+ "\t\t\t\"id\": \"%s\",\n"
+ "\t\t\t\"name\": \"%s\",\n"
+ "\t\t\t\"type\": \"%s\",\n"
+ "\t\t\t\"family\": \"%s\",\n"
+ "\t\t\t\"context\": \"%s\",\n"
+ "\t\t\t\"title\": \"%s (%s)\",\n"
+ "\t\t\t\"priority\": %ld,\n"
+ "\t\t\t\"plugin\": \"%s\",\n"
+ "\t\t\t\"module\": \"%s\",\n"
+ "\t\t\t\"units\": \"%s\",\n"
+ "\t\t\t\"data_url\": \"/api/v1/data?chart=%s\",\n"
+ "\t\t\t\"chart_type\": \"%s\",\n",
+ rrdset_id(st),
+ rrdset_name(st),
+ rrdset_parts_type(st),
+ rrdset_family(st),
+ rrdset_context(st),
+ rrdset_title(st),
+ rrdset_name(st),
+ st->priority,
+ rrdset_plugin_name(st),
+ rrdset_module_name(st),
+ rrdset_units(st),
+ rrdset_name(st),
+ rrdset_type_name(st->chart_type));
+
+ if (likely(!skip_volatile))
+ buffer_sprintf(
+ wb,
+ "\t\t\t\"duration\": %"PRId64",\n",
+ (int64_t)(last_entry_t - first_entry_t + st->update_every) //st->entries * st->update_every
+ );
+
+ buffer_sprintf(
+ wb,
+ "\t\t\t\"first_entry\": %"PRId64",\n",
+ (int64_t)first_entry_t //rrdset_first_entry_t(st)
+ );
+
+ if (likely(!skip_volatile))
+ buffer_sprintf(
+ wb,
+ "\t\t\t\"last_entry\": %"PRId64",\n",
+ (int64_t)last_entry_t //rrdset_last_entry_t(st)
+ );
+
+ buffer_sprintf(
+ wb,
+ "\t\t\t\"update_every\": %d,\n"
+ "\t\t\t\"dimensions\": {\n",
+ st->update_every);
+
+ unsigned long memory = sizeof(RRDSET) + st->memsize;
+
+ size_t dimensions = 0;
+ RRDDIM *rd;
+ rrddim_foreach_read(rd, st) {
+ if(rrddim_option_check(rd, RRDDIM_OPTION_HIDDEN) || rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE)) continue;
+
+ memory += sizeof(RRDDIM) + rd->memsize;
+
+ if (dimensions)
+ buffer_strcat(wb, ",\n\t\t\t\t\"");
+ else
+ buffer_strcat(wb, "\t\t\t\t\"");
+ buffer_strcat_jsonescape(wb, rrddim_id(rd));
+ buffer_strcat(wb, "\": { \"name\": \"");
+ buffer_strcat_jsonescape(wb, rrddim_name(rd));
+ buffer_strcat(wb, "\" }");
+
+ dimensions++;
+ }
+ rrddim_foreach_done(rd);
+
+ if(dimensions_count) *dimensions_count += dimensions;
+ if(memory_used) *memory_used += memory;
+
+ buffer_sprintf(wb, "\n\t\t\t},\n\t\t\t\"chart_variables\": ");
+ health_api_v1_chart_custom_variables2json(st, wb);
+
+ buffer_strcat(wb, ",\n\t\t\t\"green\": ");
+ buffer_rrd_value(wb, st->green);
+ buffer_strcat(wb, ",\n\t\t\t\"red\": ");
+ buffer_rrd_value(wb, st->red);
+
+ if (likely(!skip_volatile)) {
+ buffer_strcat(wb, ",\n\t\t\t\"alarms\": {\n");
+ size_t alarms = 0;
+ RRDCALC *rc;
+ netdata_rwlock_rdlock(&st->alerts.rwlock);
+ DOUBLE_LINKED_LIST_FOREACH_FORWARD(st->alerts.base, rc, prev, next) {
+ buffer_sprintf(
+ wb,
+ "%s"
+ "\t\t\t\t\"%s\": {\n"
+ "\t\t\t\t\t\"id\": %u,\n"
+ "\t\t\t\t\t\"status\": \"%s\",\n"
+ "\t\t\t\t\t\"units\": \"%s\",\n"
+ "\t\t\t\t\t\"update_every\": %d\n"
+ "\t\t\t\t}",
+ (alarms) ? ",\n" : "", rrdcalc_name(rc), rc->id, rrdcalc_status2string(rc->status), rrdcalc_units(rc),
+ rc->update_every);
+
+ alarms++;
+ }
+ netdata_rwlock_unlock(&st->alerts.rwlock);
+ buffer_sprintf(wb,
+ "\n\t\t\t}"
+ );
+ }
+ buffer_strcat(wb, ",\n\t\t\t\"chart_labels\": {\n");
+ chart_labels2json(st, wb, 2);
+ buffer_strcat(wb, "\t\t\t}");
+
+ buffer_strcat(wb, ",\n\t\t\t\"functions\": {\n");
+ chart_functions2json(st, wb, 4, "\"", "\"");
+ buffer_strcat(wb, "\t\t\t}");
+
+ buffer_sprintf(wb,
+ "\n\t\t}"
+ );
+}
diff --git a/web/api/formatters/rrdset2json.h b/web/api/formatters/rrdset2json.h
new file mode 100644
index 0000000..b2908e2
--- /dev/null
+++ b/web/api/formatters/rrdset2json.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_FORMATTER_RRDSET2JSON_H
+#define NETDATA_API_FORMATTER_RRDSET2JSON_H
+
+#include "rrd2json.h"
+
+void rrdset2json(RRDSET *st, BUFFER *wb, size_t *dimensions_count, size_t *memory_used, int skip_volatile);
+
+#endif //NETDATA_API_FORMATTER_RRDSET2JSON_H
diff --git a/web/api/formatters/ssv/Makefile.am b/web/api/formatters/ssv/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/web/api/formatters/ssv/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/formatters/ssv/README.md b/web/api/formatters/ssv/README.md
new file mode 100644
index 0000000..d9e193d
--- /dev/null
+++ b/web/api/formatters/ssv/README.md
@@ -0,0 +1,59 @@
+<!--
+title: "SSV formatter"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/formatters/ssv/README.md
+-->
+
+# SSV formatter
+
+The SSV formatter sums all dimensions in [results of database queries](/web/api/queries/README.md)
+to a single value and returns a list of such values showing how it changes through time.
+
+It supports the following formats:
+
+| format | content type | description |
+|:----:|:----------:|:----------|
+| `ssv` | text/plain | a space separated list of values |
+| `ssvcomma` | text/plain | a comma separated list of values |
+| `array` | application/json | a JSON array |
+
+The SSV formatter respects the following API `&options=`:
+
+| option | supported | description |
+| :----:|:-------:|:----------|
+| `nonzero` | yes | to return only the dimensions that have at least a non-zero value |
+| `flip` | yes | to return the numbers older to newer (the default is newer to older) |
+| `percent` | yes | to replace all values with their percentage over the row total |
+| `abs` | yes | to turn all values positive, before using them |
+| `min2max` | yes | to return the delta from the minimum value to the maximum value (across dimensions) |
+
+## Examples
+
+Get the average system CPU utilization of the last hour, in 6 values (one every 10 minutes):
+
+```bash
+# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=system.cpu&format=ssv&after=-3600&points=6&group=average'
+1.741352 1.6800467 1.769411 1.6761112 1.629862 1.6807968
+```
+
+---
+
+Get the total mysql bandwidth (in + out) for the last hour, in 6 values (one every 10 minutes):
+
+Netdata returns bandwidth in `kilobits`.
+
+```bash
+# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=mysql_local.net&format=ssvcomma&after=-3600&points=6&group=sum&options=abs'
+72618.7936215,72618.778889,72618.788084,72618.9195918,72618.7760612,72618.6712421
+```
+
+---
+
+Get the web server max connections for the last hour, in 12 values (one every 5 minutes)
+in a JSON array:
+
+```bash
+# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=nginx_local.connections&format=array&after=-3600&points=12&group=max'
+[278,258,268,239,259,260,243,266,278,318,264,258]
+```
+
+
diff --git a/web/api/formatters/ssv/ssv.c b/web/api/formatters/ssv/ssv.c
new file mode 100644
index 0000000..d561980
--- /dev/null
+++ b/web/api/formatters/ssv/ssv.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "ssv.h"
+
+void rrdr2ssv(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, const char *prefix, const char *separator, const char *suffix) {
+ //info("RRD2SSV(): %s: BEGIN", r->st->id);
+ long i;
+
+ buffer_strcat(wb, prefix);
+ long start = 0, end = rrdr_rows(r), step = 1;
+ if(!(options & RRDR_OPTION_REVERSED)) {
+ start = rrdr_rows(r) - 1;
+ end = -1;
+ step = -1;
+ }
+
+ // for each line in the array
+ for(i = start; i != end ;i += step) {
+ int all_values_are_null = 0;
+ NETDATA_DOUBLE v = rrdr2value(r, i, options, &all_values_are_null, NULL);
+
+ if(likely(i != start)) {
+ if(r->min > v) r->min = v;
+ if(r->max < v) r->max = v;
+ }
+ else {
+ r->min = v;
+ r->max = v;
+ }
+
+ if(likely(i != start))
+ buffer_strcat(wb, separator);
+
+ if(all_values_are_null) {
+ if(options & RRDR_OPTION_NULL2ZERO)
+ buffer_strcat(wb, "0");
+ else
+ buffer_strcat(wb, "null");
+ }
+ else
+ buffer_rrd_value(wb, v);
+ }
+ buffer_strcat(wb, suffix);
+ //info("RRD2SSV(): %s: END", r->st->id);
+}
diff --git a/web/api/formatters/ssv/ssv.h b/web/api/formatters/ssv/ssv.h
new file mode 100644
index 0000000..f7d4a95
--- /dev/null
+++ b/web/api/formatters/ssv/ssv.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_FORMATTER_SSV_H
+#define NETDATA_API_FORMATTER_SSV_H
+
+#include "../rrd2json.h"
+
+void rrdr2ssv(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, const char *prefix, const char *separator, const char *suffix);
+
+#endif //NETDATA_API_FORMATTER_SSV_H
diff --git a/web/api/formatters/value/Makefile.am b/web/api/formatters/value/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/web/api/formatters/value/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/formatters/value/README.md b/web/api/formatters/value/README.md
new file mode 100644
index 0000000..a51e32d
--- /dev/null
+++ b/web/api/formatters/value/README.md
@@ -0,0 +1,24 @@
+<!--
+title: "Value formatter"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/formatters/value/README.md
+-->
+
+# Value formatter
+
+The Value formatter presents [results of database queries](/web/api/queries/README.md) as a single value.
+
+To calculate the single value to be returned, it sums the values of all dimensions.
+
+The Value formatter respects the following API `&options=`:
+
+| option | supported | description |
+|:----: |:-------: |:---------- |
+| `percent` | yes | to replace all values with their percentage over the row total|
+| `abs` | yes | to turn all values positive, before using them |
+| `min2max` | yes | to return the delta from the minimum value to the maximum value (across dimensions)|
+
+The Value formatter is not exposed by the API by itself.
+Instead it is used by the [`ssv`](/web/api/formatters/ssv/README.md) formatter
+and [health monitoring queries](/health/README.md).
+
+
diff --git a/web/api/formatters/value/value.c b/web/api/formatters/value/value.c
new file mode 100644
index 0000000..46a7130
--- /dev/null
+++ b/web/api/formatters/value/value.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "value.h"
+
+
+inline NETDATA_DOUBLE rrdr2value(RRDR *r, long i, RRDR_OPTIONS options, int *all_values_are_null, NETDATA_DOUBLE *anomaly_rate) {
+ QUERY_TARGET *qt = r->internal.qt;
+ long c;
+ const long used = qt->query.used;
+
+ NETDATA_DOUBLE *cn = &r->v[ i * r->d ];
+ RRDR_VALUE_FLAGS *co = &r->o[ i * r->d ];
+ NETDATA_DOUBLE *ar = &r->ar[ i * r->d ];
+
+ NETDATA_DOUBLE sum = 0, min = 0, max = 0, v;
+ int all_null = 1, init = 1;
+
+ NETDATA_DOUBLE total = 1;
+ NETDATA_DOUBLE total_anomaly_rate = 0;
+
+ int set_min_max = 0;
+ if(unlikely(options & RRDR_OPTION_PERCENTAGE)) {
+ total = 0;
+ for (c = 0; c < used; c++) {
+ NETDATA_DOUBLE n = cn[c];
+
+ if(likely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
+ n = -n;
+
+ total += n;
+ }
+ // prevent a division by zero
+ if(total == 0) total = 1;
+ set_min_max = 1;
+ }
+
+ // for each dimension
+ for (c = 0; c < used; c++) {
+ if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
+ if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
+
+ NETDATA_DOUBLE n = cn[c];
+
+ if(likely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
+ n = -n;
+
+ if(unlikely(options & RRDR_OPTION_PERCENTAGE)) {
+ n = n * 100 / total;
+
+ if(unlikely(set_min_max)) {
+ r->min = r->max = n;
+ set_min_max = 0;
+ }
+
+ if(n < r->min) r->min = n;
+ if(n > r->max) r->max = n;
+ }
+
+ if(unlikely(init)) {
+ if(n > 0) {
+ min = 0;
+ max = n;
+ }
+ else {
+ min = n;
+ max = 0;
+ }
+ init = 0;
+ }
+
+ if(likely(!(co[c] & RRDR_VALUE_EMPTY))) {
+ all_null = 0;
+ sum += n;
+ }
+
+ if(n < min) min = n;
+ if(n > max) max = n;
+
+ total_anomaly_rate += ar[c];
+ }
+
+ if(anomaly_rate) {
+ if(!r->d) *anomaly_rate = 0;
+ else *anomaly_rate = total_anomaly_rate / (NETDATA_DOUBLE)r->d;
+ }
+
+ if(unlikely(all_null)) {
+ if(likely(all_values_are_null))
+ *all_values_are_null = 1;
+ return 0;
+ }
+ else {
+ if(likely(all_values_are_null))
+ *all_values_are_null = 0;
+ }
+
+ if(options & RRDR_OPTION_MIN2MAX)
+ v = max - min;
+ else
+ v = sum;
+
+ return v;
+}
+
+QUERY_VALUE rrdmetric2value(RRDHOST *host,
+ struct rrdcontext_acquired *rca, struct rrdinstance_acquired *ria, struct rrdmetric_acquired *rma,
+ time_t after, time_t before,
+ RRDR_OPTIONS options, RRDR_GROUPING group_method, const char *group_options,
+ size_t tier, time_t timeout, QUERY_SOURCE query_source
+) {
+ QUERY_TARGET_REQUEST qtr = {
+ .host = host,
+ .rca = rca,
+ .ria = ria,
+ .rma = rma,
+ .after = after,
+ .before = before,
+ .points = 1,
+ .options = options,
+ .group_method = group_method,
+ .group_options = group_options,
+ .tier = tier,
+ .timeout = timeout,
+ .query_source = query_source,
+ };
+
+ ONEWAYALLOC *owa = onewayalloc_create(16 * 1024);
+ RRDR *r = rrd2rrdr(owa, query_target_create(&qtr));
+
+ QUERY_VALUE qv;
+
+ if(!r || rrdr_rows(r) == 0) {
+ qv = (QUERY_VALUE) {
+ .value = NAN,
+ .anomaly_rate = NAN,
+ };
+ }
+ else {
+ qv = (QUERY_VALUE) {
+ .after = r->after,
+ .before = r->before,
+ .points_read = r->internal.db_points_read,
+ .result_points = r->internal.result_points_generated,
+ };
+
+ for(size_t t = 0; t < storage_tiers ;t++)
+ qv.storage_points_per_tier[t] = r->internal.tier_points_read[t];
+
+ long i = (!(options & RRDR_OPTION_REVERSED))?(long)rrdr_rows(r) - 1:0;
+ int all_values_are_null = 0;
+ qv.value = rrdr2value(r, i, options, &all_values_are_null, &qv.anomaly_rate);
+ if(all_values_are_null) {
+ qv.value = NAN;
+ qv.anomaly_rate = NAN;
+ }
+ }
+
+ rrdr_free(owa, r);
+ onewayalloc_destroy(owa);
+
+ return qv;
+}
diff --git a/web/api/formatters/value/value.h b/web/api/formatters/value/value.h
new file mode 100644
index 0000000..76b1869
--- /dev/null
+++ b/web/api/formatters/value/value.h
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_FORMATTER_VALUE_H
+#define NETDATA_API_FORMATTER_VALUE_H
+
+#include "../rrd2json.h"
+
+typedef struct storage_value {
+ NETDATA_DOUBLE value;
+ NETDATA_DOUBLE anomaly_rate;
+ time_t after;
+ time_t before;
+ size_t points_read;
+ size_t storage_points_per_tier[RRD_STORAGE_TIERS];
+ size_t result_points;
+} QUERY_VALUE;
+
+struct rrdmetric_acquired;
+struct rrdinstance_acquired;
+struct rrdcontext_acquired;
+
+QUERY_VALUE rrdmetric2value(RRDHOST *host,
+ struct rrdcontext_acquired *rca, struct rrdinstance_acquired *ria, struct rrdmetric_acquired *rma,
+ time_t after, time_t before,
+ RRDR_OPTIONS options, RRDR_GROUPING group_method, const char *group_options,
+ size_t tier, time_t timeout, QUERY_SOURCE query_source
+);
+
+NETDATA_DOUBLE rrdr2value(RRDR *r, long i, RRDR_OPTIONS options, int *all_values_are_null, NETDATA_DOUBLE *anomaly_rate);
+
+#endif //NETDATA_API_FORMATTER_VALUE_H
diff --git a/web/api/health/Makefile.am b/web/api/health/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/web/api/health/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/health/README.md b/web/api/health/README.md
new file mode 100644
index 0000000..9ec8f31
--- /dev/null
+++ b/web/api/health/README.md
@@ -0,0 +1,225 @@
+<!--
+title: "Health API Calls"
+date: 2020-04-27
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/health/README.md
+-->
+
+# Health API Calls
+
+## Health Read API
+
+### Enabled Alarms
+
+Netdata enables alarms on demand, i.e. when the chart they should be linked to starts collecting data. So, although many
+more alarms are configured, only the useful ones are enabled.
+
+To get the list of all enabled alarms, open your browser and navigate to `http://NODE:19999/api/v1/alarms?all`,
+replacing `NODE` with the IP address or hostname for your Agent dashboard.
+
+### Raised Alarms
+
+This API call will return the alarms currently in WARNING or CRITICAL state.
+
+`http://NODE:19999/api/v1/alarms`
+
+### Event Log
+
+The size of the alarm log is configured in `netdata.conf`. There are 2 settings: the rotation of the alarm log file and the in memory size of the alarm log.
+
+```
+[health]
+ in memory max health log entries = 1000
+ rotate log every lines = 2000
+```
+
+The API call retrieves all entries of the alarm log:
+
+`http://NODE:19999/api/v1/alarm_log`
+
+### Alarm Log Incremental Updates
+
+`http://NODE:19999/api/v1/alarm_log?after=UNIQUEID`
+
+The above returns all the events in the alarm log that occurred after UNIQUEID (you poll it once without `after=`, remember the last UNIQUEID of the returned set, which you give back to get incrementally the next events).
+
+### Alarm badges
+
+The following will return an SVG badge of the alarm named `NAME`, attached to the chart named `CHART`.
+
+`http://NODE:19999/api/v1/badge.svg?alarm=NAME&chart=CHART`
+
+## Health Management API
+
+Netdata v1.12 and beyond provides a command API to control health checks and notifications at runtime. The feature is especially useful for maintenance periods, during which you receive meaningless alarms.
+From Netdata v1.16.0 and beyond, the configuration controlled via the API commands is [persisted across Netdata restarts](#persistence).
+
+Specifically, the API allows you to:
+
+- Disable health checks completely. Alarm conditions will not be evaluated at all and no entries will be added to the alarm log.
+- Silence alarm notifications. Alarm conditions will be evaluated, the alarms will appear in the log and the Netdata UI will show the alarms as active, but no notifications will be sent.
+- Disable or Silence specific alarms that match selectors on alarm/template name, chart, context, host and family.
+
+The API is available by default, but it is protected by an `api authorization token` that is stored in the file you will see in the following entry of `http://NODE:19999/netdata.conf`:
+
+```
+[registry]
+ # netdata management api key file = /var/lib/netdata/netdata.api.key
+```
+
+You can access the API via GET requests, by adding the bearer token to an `Authorization` http header, like this:
+
+```
+curl "http://NODE:19999/api/v1/manage/health?cmd=RESET" -H "X-Auth-Token: Mytoken"
+```
+
+By default access to the health management API is only allowed from `localhost`. Accessing the API from anything else will return a 403 error with the message `You are not allowed to access this resource.`. You can change permissions by editing the `allow management from` variable in `netdata.conf` within the [web] section. See [web server access lists](/web/server/README.md#access-lists) for more information.
+
+The command `RESET` just returns Netdata to the default operation, with all health checks and notifications enabled.
+If you've configured and entered your token correctly, you should see the plain text response `All health checks and notifications are enabled`.
+
+### Disable or silence all alarms
+
+If all you need is temporarily disable all health checks, then you issue the following before your maintenance period starts:
+
+```sh
+curl "http://NODE:19999/api/v1/manage/health?cmd=DISABLE ALL" -H "X-Auth-Token: Mytoken"
+```
+
+The effect of disabling health checks is that the alarm criteria are not evaluated at all and nothing is written in the alarm log.
+If you want the health checks to be running but to not receive any notifications during your maintenance period, you can instead use this:
+
+```sh
+curl "http://NODE:19999/api/v1/manage/health?cmd=SILENCE ALL" -H "X-Auth-Token: Mytoken"
+```
+
+Alarms may then still be raised and logged in Netdata, so you'll be able to see them via the UI.
+
+Regardless of the option you choose, at the end of your maintenance period you revert to the normal state via the RESET command.
+
+```sh
+ curl "http://NODE:19999/api/v1/manage/health?cmd=RESET" -H "X-Auth-Token: Mytoken"
+```
+
+### Disable or silence specific alarms
+
+If you do not wish to disable/silence all alarms, then the `DISABLE ALL` and `SILENCE ALL` commands can't be used.
+Instead, the following commands expect that one or more alarm selectors will be added, so that only alarms that match the selectors are disabled or silenced.
+
+- `DISABLE` : Set the mode to disable health checks.
+- `SILENCE` : Set the mode to silence notifications.
+
+You will normally put one of these commands in the same request with your first alarm selector, but it's possible to issue them separately as well.
+You will get a warning in the response, if a selector was added without a SILENCE/DISABLE command, or vice versa.
+
+Each request can specify a single alarm `selector`, with one or more `selection criteria`.
+A single alarm will match a `selector` if all selection criteria match the alarm.
+You can add as many selectors as you like.
+In essence, the rule is: IF (alarm matches all the criteria in selector1 OR all the criteria in selector2 OR ...) THEN apply the DISABLE or SILENCE command.
+
+To clear all selectors and reset the mode to default, use the `RESET` command.
+
+The following example silences notifications for all the alarms with context=load:
+
+```
+curl "http://NODE:19999/api/v1/manage/health?cmd=SILENCE&context=load" -H "X-Auth-Token: Mytoken"
+```
+
+#### Selection criteria
+
+The `selection criteria` are key/value pairs, in the format `key : value`, where value is a Netdata [simple pattern](/libnetdata/simple_pattern/README.md). This means that you can create very powerful selectors (you will rarely need more than one or two).
+
+The accepted keys for the `selection criteria` are the following:
+
+- `alarm` : The expression provided will match both `alarm` and `template` names.
+- `chart` : Chart ids/names, as shown on the dashboard. These will match the `on` entry of a configured `alarm`.
+- `context` : Chart context, as shown on the dashboard. These will match the `on` entry of a configured `template`.
+- `hosts` : The hostnames that will need to match.
+- `families` : The alarm families.
+
+You can add any of the selection criteria you need on the request, to ensure that only the alarms you are interested in are matched and disabled/silenced. e.g. there is no reason to add `hosts: *`, if you want the criteria to be applied to alarms for all hosts.
+
+Example 1: Disable all health checks for context = `random`
+
+```
+http://NODE:19999/api/v1/manage/health?cmd=DISABLE&context=random
+```
+
+Example 2: Silence all alarms and templates with name starting with `out_of` on host `myhost`
+
+```
+http://NODE:19999/api/v1/manage/health?cmd=SILENCE&alarm=out_of*&hosts=myhost
+```
+
+Example 2.2: Add one more selector, to also silence alarms for cpu1 and cpu2
+
+```
+http://NODE:19999/api/v1/manage/health?families=cpu1 cpu2
+```
+
+### List silencers
+
+The command `LIST` was added in Netdata v1.16.0 and returns a JSON with the current status of the silencers.
+
+```
+ curl "http://NODE:19999/api/v1/manage/health?cmd=LIST" -H "X-Auth-Token: Mytoken"
+```
+
+As an example, the following response shows that we have two silencers configured, one for an alarm called `samplealarm` and one for alarms with context `random` on host `myhost`
+
+```
+json
+{
+ "all": false,
+ "type": "SILENCE",
+ "silencers": [
+ {
+ "alarm": "samplealarm"
+ },
+ {
+ "context": "random",
+ "hosts": "myhost"
+ }
+ ]
+}
+```
+
+The response below shows that we have disabled all health checks.
+
+```
+json
+{
+ "all": true,
+ "type": "DISABLE",
+ "silencers": []
+}
+```
+
+### Responses
+
+- "Auth Error" : Token authentication failed
+- "All alarm notifications are silenced" : Successful response to cmd=SILENCE ALL
+- "All health checks are disabled" : Successful response to cmd=DISABLE ALL
+- "All health checks and notifications are enabled" : Successful response to cmd=RESET
+- "Health checks disabled for alarms matching the selectors" : Added to the response for a cmd=DISABLE
+- "Alarm notifications silenced for alarms matching the selectors" : Added to the response for a cmd=SILENCE
+- "Alarm selector added" : Added to the response when a new selector is added
+- "Invalid key. Ignoring it." : Wrong name of a parameter. Added to the response and ignored.
+- "WARNING: Added alarm selector to silence/disable alarms without a SILENCE or DISABLE command." : Added to the response if a selector is added without a selector-specific command.
+- "WARNING: SILENCE or DISABLE command is ineffective without defining any alarm selectors." : Added to the response if a selector-specific command is issued without a selector.
+
+### Persistence
+
+From Netdata v1.16.0 and beyond, the silencers configuration is persisted to disk and loaded when Netdata starts.
+The JSON string returned by the [LIST command](#list-silencers) is automatically saved to the `silencers file`, every time a command alters the silencers configuration.
+The file's location is configurable in `netdata.conf`. The default is shown below:
+
+```
+[health]
+ # silencers file = /var/lib/netdata/health.silencers.json
+```
+
+### Further reading
+
+The test script under [tests/health_mgmtapi](/tests/health_mgmtapi/README.md) contains a series of tests that you can either run or read through to understand the various calls and responses better.
+
+
diff --git a/web/api/health/health_cmdapi.c b/web/api/health/health_cmdapi.c
new file mode 100644
index 0000000..bad3e96
--- /dev/null
+++ b/web/api/health/health_cmdapi.c
@@ -0,0 +1,206 @@
+//
+// Created by Christopher on 11/12/18.
+//
+
+#include "health_cmdapi.h"
+
+/**
+ * Free Silencers
+ *
+ * Clean the silencer structure
+ *
+ * @param t is the structure that will be cleaned.
+ */
+void free_silencers(SILENCER *t) {
+ if (!t) return;
+ if (t->next) free_silencers(t->next);
+ debug(D_HEALTH, "HEALTH command API: Freeing silencer %s:%s:%s:%s:%s", t->alarms,
+ t->charts, t->contexts, t->hosts, t->families);
+ simple_pattern_free(t->alarms_pattern);
+ simple_pattern_free(t->charts_pattern);
+ simple_pattern_free(t->contexts_pattern);
+ simple_pattern_free(t->hosts_pattern);
+ simple_pattern_free(t->families_pattern);
+ freez(t->alarms);
+ freez(t->charts);
+ freez(t->contexts);
+ freez(t->hosts);
+ freez(t->families);
+ freez(t);
+ return;
+}
+
+/**
+ * Silencers to JSON Entry
+ *
+ * Fill the buffer with the other values given.
+ *
+ * @param wb a pointer to the output buffer
+ * @param var the json variable
+ * @param val the json value
+ * @param hasprev has it a previous value?
+ *
+ * @return
+ */
+int health_silencers2json_entry(BUFFER *wb, char* var, char* val, int hasprev) {
+ if (val) {
+ buffer_sprintf(wb, "%s\n\t\t\t\"%s\": \"%s\"", (hasprev)?",":"", var, val);
+ return 1;
+ } else {
+ return hasprev;
+ }
+}
+
+/**
+ * Silencer to JSON
+ *
+ * Write the silencer values using JSON format inside a buffer.
+ *
+ * @param wb is the buffer to write the silencers.
+ */
+void health_silencers2json(BUFFER *wb) {
+ buffer_sprintf(wb, "{\n\t\"all\": %s,"
+ "\n\t\"type\": \"%s\","
+ "\n\t\"silencers\": [",
+ (silencers->all_alarms)?"true":"false",
+ (silencers->stype == STYPE_NONE)?"None":((silencers->stype == STYPE_DISABLE_ALARMS)?"DISABLE":"SILENCE"));
+
+ SILENCER *silencer;
+ int i = 0, j = 0;
+ for(silencer = silencers->silencers; silencer ; silencer = silencer->next) {
+ if(likely(i)) buffer_strcat(wb, ",");
+ buffer_strcat(wb, "\n\t\t{");
+ j=health_silencers2json_entry(wb, HEALTH_ALARM_KEY, silencer->alarms, j);
+ j=health_silencers2json_entry(wb, HEALTH_CHART_KEY, silencer->charts, j);
+ j=health_silencers2json_entry(wb, HEALTH_CONTEXT_KEY, silencer->contexts, j);
+ j=health_silencers2json_entry(wb, HEALTH_HOST_KEY, silencer->hosts, j);
+ health_silencers2json_entry(wb, HEALTH_FAMILIES_KEY, silencer->families, j);
+ j=0;
+ buffer_strcat(wb, "\n\t\t}");
+ i++;
+ }
+ if(likely(i)) buffer_strcat(wb, "\n\t");
+ buffer_strcat(wb, "]\n}\n");
+}
+
+/**
+ * Silencer to FILE
+ *
+ * Write the silencer buffer to a file.
+ * @param wb
+ */
+void health_silencers2file(BUFFER *wb) {
+ if (wb->len == 0) return;
+
+ FILE *fd = fopen(silencers_filename, "wb");
+ if(fd) {
+ size_t written = (size_t)fprintf(fd, "%s", wb->buffer) ;
+ if (written == wb->len ) {
+ info("Silencer changes written to %s", silencers_filename);
+ }
+ fclose(fd);
+ return;
+ }
+ error("Silencer changes could not be written to %s. Error %s", silencers_filename, strerror(errno));
+}
+
+/**
+ * Request V1 MGMT Health
+ *
+ * Function called by api to management the health.
+ *
+ * @param host main structure with client information!
+ * @param w is the structure with all information of the client request.
+ * @param url is the url that netdata is working
+ *
+ * @return It returns 200 on success and another code otherwise.
+ */
+int web_client_api_request_v1_mgmt_health(RRDHOST *host, struct web_client *w, char *url) {
+ int ret;
+ (void) host;
+
+ BUFFER *wb = w->response.data;
+ buffer_flush(wb);
+ wb->contenttype = CT_TEXT_PLAIN;
+
+ buffer_flush(w->response.data);
+
+ //Local instance of the silencer
+ SILENCER *silencer = NULL;
+ int config_changed = 1;
+
+ if (!w->auth_bearer_token) {
+ buffer_strcat(wb, HEALTH_CMDAPI_MSG_AUTHERROR);
+ ret = HTTP_RESP_FORBIDDEN;
+ } else {
+ debug(D_HEALTH, "HEALTH command API: Comparing secret '%s' to '%s'", w->auth_bearer_token, api_secret);
+ if (strcmp(w->auth_bearer_token, api_secret)) {
+ buffer_strcat(wb, HEALTH_CMDAPI_MSG_AUTHERROR);
+ ret = HTTP_RESP_FORBIDDEN;
+ } else {
+ while (url) {
+ char *value = mystrsep(&url, "&");
+ if (!value || !*value) continue;
+
+ char *key = mystrsep(&value, "=");
+ if (!key || !*key) continue;
+ if (!value || !*value) continue;
+
+ debug(D_WEB_CLIENT, "%llu: API v1 health query param '%s' with value '%s'", w->id, key, value);
+
+ // name and value are now the parameters
+ if (!strcmp(key, "cmd")) {
+ if (!strcmp(value, HEALTH_CMDAPI_CMD_SILENCEALL)) {
+ silencers->all_alarms = 1;
+ silencers->stype = STYPE_SILENCE_NOTIFICATIONS;
+ buffer_strcat(wb, HEALTH_CMDAPI_MSG_SILENCEALL);
+ } else if (!strcmp(value, HEALTH_CMDAPI_CMD_DISABLEALL)) {
+ silencers->all_alarms = 1;
+ silencers->stype = STYPE_DISABLE_ALARMS;
+ buffer_strcat(wb, HEALTH_CMDAPI_MSG_DISABLEALL);
+ } else if (!strcmp(value, HEALTH_CMDAPI_CMD_SILENCE)) {
+ silencers->stype = STYPE_SILENCE_NOTIFICATIONS;
+ buffer_strcat(wb, HEALTH_CMDAPI_MSG_SILENCE);
+ } else if (!strcmp(value, HEALTH_CMDAPI_CMD_DISABLE)) {
+ silencers->stype = STYPE_DISABLE_ALARMS;
+ buffer_strcat(wb, HEALTH_CMDAPI_MSG_DISABLE);
+ } else if (!strcmp(value, HEALTH_CMDAPI_CMD_RESET)) {
+ silencers->all_alarms = 0;
+ silencers->stype = STYPE_NONE;
+ free_silencers(silencers->silencers);
+ silencers->silencers = NULL;
+ buffer_strcat(wb, HEALTH_CMDAPI_MSG_RESET);
+ } else if (!strcmp(value, HEALTH_CMDAPI_CMD_LIST)) {
+ w->response.data->contenttype = CT_APPLICATION_JSON;
+ health_silencers2json(wb);
+ config_changed=0;
+ }
+ } else {
+ silencer = health_silencers_addparam(silencer, key, value);
+ }
+ }
+
+ if (likely(silencer)) {
+ health_silencers_add(silencer);
+ buffer_strcat(wb, HEALTH_CMDAPI_MSG_ADDED);
+ if (silencers->stype == STYPE_NONE) {
+ buffer_strcat(wb, HEALTH_CMDAPI_MSG_STYPEWARNING);
+ }
+ }
+ if (unlikely(silencers->stype != STYPE_NONE && !silencers->all_alarms && !silencers->silencers)) {
+ buffer_strcat(wb, HEALTH_CMDAPI_MSG_NOSELECTORWARNING);
+ }
+ ret = HTTP_RESP_OK;
+ }
+ }
+ w->response.data = wb;
+ buffer_no_cacheable(w->response.data);
+ if (ret == HTTP_RESP_OK && config_changed) {
+ BUFFER *jsonb = buffer_create(200);
+ health_silencers2json(jsonb);
+ health_silencers2file(jsonb);
+ buffer_free(jsonb);
+ }
+
+ return ret;
+}
diff --git a/web/api/health/health_cmdapi.h b/web/api/health/health_cmdapi.h
new file mode 100644
index 0000000..d5309c7
--- /dev/null
+++ b/web/api/health/health_cmdapi.h
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_WEB_HEALTH_SVG_H
+#define NETDATA_WEB_HEALTH_SVG_H 1
+
+#include "libnetdata/libnetdata.h"
+#include "web/server/web_client.h"
+#include "health/health.h"
+
+#define HEALTH_CMDAPI_CMD_SILENCEALL "SILENCE ALL"
+#define HEALTH_CMDAPI_CMD_DISABLEALL "DISABLE ALL"
+#define HEALTH_CMDAPI_CMD_SILENCE "SILENCE"
+#define HEALTH_CMDAPI_CMD_DISABLE "DISABLE"
+#define HEALTH_CMDAPI_CMD_RESET "RESET"
+#define HEALTH_CMDAPI_CMD_LIST "LIST"
+
+#define HEALTH_CMDAPI_MSG_AUTHERROR "Auth Error\n"
+#define HEALTH_CMDAPI_MSG_SILENCEALL "All alarm notifications are silenced\n"
+#define HEALTH_CMDAPI_MSG_DISABLEALL "All health checks are disabled\n"
+#define HEALTH_CMDAPI_MSG_RESET "All health checks and notifications are enabled\n"
+#define HEALTH_CMDAPI_MSG_DISABLE "Health checks disabled for alarms matching the selectors\n"
+#define HEALTH_CMDAPI_MSG_SILENCE "Alarm notifications silenced for alarms matching the selectors\n"
+#define HEALTH_CMDAPI_MSG_ADDED "Alarm selector added\n"
+#define HEALTH_CMDAPI_MSG_STYPEWARNING "WARNING: Added alarm selector to silence/disable alarms without a SILENCE or DISABLE command.\n"
+#define HEALTH_CMDAPI_MSG_NOSELECTORWARNING "WARNING: SILENCE or DISABLE command is ineffective without defining any alarm selectors.\n"
+
+int web_client_api_request_v1_mgmt_health(RRDHOST *host, struct web_client *w, char *url);
+
+#include "web/api/web_api_v1.h"
+
+#endif /* NETDATA_WEB_HEALTH_SVG_H */
diff --git a/web/api/netdata-swagger.json b/web/api/netdata-swagger.json
new file mode 100644
index 0000000..cb2b480
--- /dev/null
+++ b/web/api/netdata-swagger.json
@@ -0,0 +1,3288 @@
+{
+ "openapi": "3.0.0",
+ "info": {
+ "title": "Netdata API",
+ "description": "Real-time performance and health monitoring.",
+ "version": "1.33.1"
+ },
+ "paths": {
+ "/info": {
+ "get": {
+ "summary": "Get netdata basic information",
+ "description": "The info endpoint returns basic information about netdata. It provides:\n* netdata version\n* netdata unique id\n* list of hosts mirrored (includes itself)\n* Operating System, Virtualization, K8s nodes and Container technology information\n* List of active collector plugins and modules\n* Streaming information\n* number of alarms in the host\n * number of alarms in normal state\n * number of alarms in warning state\n * number of alarms in critical state\n",
+ "responses": {
+ "200": {
+ "description": "netdata basic information.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/info"
+ }
+ }
+ }
+ },
+ "503": {
+ "description": "netdata daemon not ready (used for health checks)."
+ }
+ }
+ }
+ },
+ "/charts": {
+ "get": {
+ "summary": "Get a list of all charts available at the server",
+ "description": "The charts endpoint returns a summary about all charts stored in the netdata server.",
+ "responses": {
+ "200": {
+ "description": "An array of charts.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/chart_summary"
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "/chart": {
+ "get": {
+ "summary": "Get info about a specific chart",
+ "description": "The chart endpoint returns detailed information about a chart.",
+ "parameters": [
+ {
+ "name": "chart",
+ "in": "query",
+ "description": "The id of the chart as returned by the /charts call.",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "format": "as returned by /charts",
+ "default": "system.cpu"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "A javascript object with detailed information about the chart.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/chart"
+ }
+ }
+ }
+ },
+ "400": {
+ "description": "No chart id was supplied in the request."
+ },
+ "404": {
+ "description": "No chart with the given id is found."
+ }
+ }
+ }
+ },
+ "/contexts": {
+ "get": {
+ "summary": "Get a list of all contexts available at the server",
+ "description": "The contexts endpoint returns a summary about all contexts stored in the netdata server.",
+ "parameters": [
+ {
+ "name": "options",
+ "in": "query",
+ "description": "Options that affect data generation.",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "array",
+ "items": {
+ "type": "string",
+ "enum": [
+ "full",
+ "all",
+ "charts",
+ "dimensions",
+ "labels",
+ "uuids",
+ "queue",
+ "flags",
+ "deleted",
+ "deepscan"
+ ]
+ },
+ "default": [
+ "full"
+ ]
+ }
+ },
+ {
+ "name": "after",
+ "in": "query",
+ "description": "limit the results on context having data after this timestamp.",
+ "required": false,
+ "schema": {
+ "type": "number",
+ "format": "integer"
+ }
+ },
+ {
+ "name": "before",
+ "in": "query",
+ "description": "limit the results on context having data before this timestamp.",
+ "required": false,
+ "schema": {
+ "type": "number",
+ "format": "integer"
+ }
+ },
+ {
+ "name": "chart_label_key",
+ "in": "query",
+ "description": "a simple pattern matching charts label keys (use comma or pipe as separator)",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "chart_labels_filter",
+ "in": "query",
+ "description": "a simple pattern matching charts label key and values (use colon for equality, comma or pipe as separator)",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "dimensions",
+ "in": "query",
+ "description": "a simple pattern matching dimensions (use comma or pipe as separator)",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "string"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "An array of contexts.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/context_summary"
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "/context": {
+ "get": {
+ "summary": "Get info about a specific context",
+ "description": "The context endpoint returns detailed information about a given context.",
+ "parameters": [
+ {
+ "name": "context",
+ "in": "query",
+ "description": "The id of the context as returned by the /contexts call.",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "format": "as returned by /contexts",
+ "default": "system.cpu"
+ }
+ },
+ {
+ "name": "options",
+ "in": "query",
+ "description": "Options that affect data generation.",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "array",
+ "items": {
+ "type": "string",
+ "enum": [
+ "full",
+ "all",
+ "charts",
+ "dimensions",
+ "labels",
+ "uuids",
+ "queue",
+ "flags",
+ "deleted",
+ "deepscan"
+ ]
+ },
+ "default": [
+ "full"
+ ]
+ }
+ },
+ {
+ "name": "after",
+ "in": "query",
+ "description": "limit the results on context having data after this timestamp.",
+ "required": false,
+ "schema": {
+ "type": "number",
+ "format": "integer"
+ }
+ },
+ {
+ "name": "before",
+ "in": "query",
+ "description": "limit the results on context having data before this timestamp.",
+ "required": false,
+ "schema": {
+ "type": "number",
+ "format": "integer"
+ }
+ },
+ {
+ "name": "chart_label_key",
+ "in": "query",
+ "description": "a simple pattern matching charts label keys (use comma or pipe as separator)",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "chart_labels_filter",
+ "in": "query",
+ "description": "a simple pattern matching charts label key and values (use colon for equality, comma or pipe as separator)",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "dimensions",
+ "in": "query",
+ "description": "a simple pattern matching dimensions (use comma or pipe as separator)",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "string"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "A javascript object with detailed information about the context.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/context"
+ }
+ }
+ }
+ },
+ "400": {
+ "description": "No context id was supplied in the request."
+ },
+ "404": {
+ "description": "No context with the given id is found."
+ }
+ }
+ }
+ },
+ "/alarm_variables": {
+ "get": {
+ "summary": "List variables available to configure alarms for a chart",
+ "description": "Returns the basic information of a chart and all the variables that can be used in alarm and template health configurations for the particular chart or family.",
+ "parameters": [
+ {
+ "name": "chart",
+ "in": "query",
+ "description": "The id of the chart as returned by the /charts call.",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "format": "as returned by /charts",
+ "default": "system.cpu"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "A javascript object with information about the chart and the available variables.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/alarm_variables"
+ }
+ }
+ }
+ },
+ "400": {
+ "description": "Bad request - the body will include a message stating what is wrong."
+ },
+ "404": {
+ "description": "No chart with the given id is found."
+ },
+ "500": {
+ "description": "Internal server error. This usually means the server is out of memory."
+ }
+ }
+ }
+ },
+ "/data": {
+ "get": {
+ "summary": "Get collected data for a specific chart",
+ "description": "The data endpoint returns data stored in the round robin database of a chart.",
+ "parameters": [
+ {
+ "name": "chart",
+ "in": "query",
+ "description": "The id of the chart as returned by the /charts call. Note chart or context must be specified",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "string",
+ "format": "as returned by /charts",
+ "default": "system.cpu"
+ }
+ },
+ {
+ "name": "context",
+ "in": "query",
+ "description": "The context of the chart as returned by the /charts call. Note chart or context must be specified",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "string",
+ "format": "as returned by /charts"
+ }
+ },
+ {
+ "name": "dimension",
+ "in": "query",
+ "description": "Zero, one or more dimension ids or names, as returned by the /chart call, separated with comma or pipe. Netdata simple patterns are supported.",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "array",
+ "items": {
+ "type": "string",
+ "format": "as returned by /charts"
+ }
+ }
+ },
+ {
+ "name": "after",
+ "in": "query",
+ "description": "This parameter can either be an absolute timestamp specifying the starting point of the data to be returned, or a relative number of seconds (negative, relative to parameter: before). Netdata will assume it is a relative number if it is less that 3 years (in seconds). If not specified the default is -600 seconds. Netdata will adapt this parameter to the boundaries of the round robin database unless the allow_past option is specified.",
+ "required": true,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "number",
+ "format": "integer",
+ "default": -600
+ }
+ },
+ {
+ "name": "before",
+ "in": "query",
+ "description": "This parameter can either be an absolute timestamp specifying the ending point of the data to be returned, or a relative number of seconds (negative), relative to the last collected timestamp. Netdata will assume it is a relative number if it is less than 3 years (in seconds). Netdata will adapt this parameter to the boundaries of the round robin database. The default is zero (i.e. the timestamp of the last value collected).",
+ "required": false,
+ "schema": {
+ "type": "number",
+ "format": "integer",
+ "default": 0
+ }
+ },
+ {
+ "name": "points",
+ "in": "query",
+ "description": "The number of points to be returned. If not given, or it is <= 0, or it is bigger than the points stored in the round robin database for this chart for the given duration, all the available collected values for the given duration will be returned.",
+ "required": true,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "number",
+ "format": "integer",
+ "default": 20
+ }
+ },
+ {
+ "name": "chart_label_key",
+ "in": "query",
+ "description": "Specify the chart label keys that need to match for context queries as comma separated values. At least one matching key is needed to match the corresponding chart.",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "string",
+ "format": "key1,key2,key3"
+ }
+ },
+ {
+ "name": "chart_labels_filter",
+ "in": "query",
+ "description": "Specify the chart label keys and values to match for context queries. All keys/values need to match for the chart to be included in the query. The labels are specified as key1:value1,key2:value2",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "string",
+ "format": "key1:value1,key2:value2,key3:value3"
+ }
+ },
+ {
+ "name": "group",
+ "in": "query",
+ "description": "The grouping method. If multiple collected values are to be grouped in order to return fewer points, this parameters defines the method of grouping. methods supported \"min\", \"max\", \"average\", \"sum\", \"incremental-sum\". \"max\" is actually calculated on the absolute value collected (so it works for both positive and negative dimensions to return the most extreme value in either direction).",
+ "required": true,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "string",
+ "enum": [
+ "min",
+ "max",
+ "average",
+ "median",
+ "stddev",
+ "sum",
+ "incremental-sum",
+ "ses",
+ "des",
+ "cv",
+ "countif",
+ "percentile",
+ "percentile25",
+ "percentile50",
+ "percentile75",
+ "percentile80",
+ "percentile90",
+ "percentile95",
+ "percentile97",
+ "percentile98",
+ "percentile99",
+ "trimmed-mean",
+ "trimmed-mean1",
+ "trimmed-mean2",
+ "trimmed-mean3",
+ "trimmed-mean5",
+ "trimmed-mean10",
+ "trimmed-mean15",
+ "trimmed-mean20",
+ "trimmed-mean25",
+ "trimmed-median",
+ "trimmed-median1",
+ "trimmed-median2",
+ "trimmed-median3",
+ "trimmed-median5",
+ "trimmed-median10",
+ "trimmed-median15",
+ "trimmed-median20",
+ "trimmed-median25"
+ ],
+ "default": "average"
+ }
+ },
+ {
+ "name": "group_options",
+ "in": "query",
+ "description": "When the group function supports additional parameters, this field can be used to pass them to it. Currently only \"countif\" supports this.",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "gtime",
+ "in": "query",
+ "description": "The grouping number of seconds. This is used in conjunction with group=average to change the units of metrics (ie when the data is per-second, setting gtime=60 will turn them to per-minute).",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "number",
+ "format": "integer",
+ "default": 0
+ }
+ },
+ {
+ "name": "timeout",
+ "in": "query",
+ "description": "Specify a timeout value in milliseconds after which the agent will abort the query and return a 503 error. A value of 0 indicates no timeout.",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "number",
+ "format": "integer",
+ "default": 0
+ }
+ },
+ {
+ "name": "format",
+ "in": "query",
+ "description": "The format of the data to be returned.",
+ "required": true,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "string",
+ "enum": [
+ "json",
+ "jsonp",
+ "csv",
+ "tsv",
+ "tsv-excel",
+ "ssv",
+ "ssvcomma",
+ "datatable",
+ "datasource",
+ "html",
+ "markdown",
+ "array",
+ "csvjsonarray"
+ ],
+ "default": "json"
+ }
+ },
+ {
+ "name": "options",
+ "in": "query",
+ "description": "Options that affect data generation.",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "array",
+ "items": {
+ "type": "string",
+ "enum": [
+ "nonzero",
+ "flip",
+ "jsonwrap",
+ "min2max",
+ "seconds",
+ "milliseconds",
+ "abs",
+ "absolute",
+ "absolute-sum",
+ "null2zero",
+ "objectrows",
+ "google_json",
+ "percentage",
+ "unaligned",
+ "match-ids",
+ "match-names",
+ "allow_past",
+ "anomaly-bit"
+ ]
+ },
+ "default": [
+ "seconds",
+ "jsonwrap"
+ ]
+ }
+ },
+ {
+ "name": "callback",
+ "in": "query",
+ "description": "For JSONP responses, the callback function name.",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "filename",
+ "in": "query",
+ "description": "Add Content-Disposition: attachment; filename= header to the response, that will instruct the browser to save the response with the given filename.",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "tqx",
+ "in": "query",
+ "description": "[Google Visualization API](https://developers.google.com/chart/interactive/docs/dev/implementing_data_source?hl=en) formatted parameter.",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "string"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "The call was successful. The response includes the data in the format requested. Swagger2.0 does not process the discriminator field to show polymorphism. The response will be one of the sub-types of the data-schema according to the chosen format, e.g. json -> data_json.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/data"
+ }
+ }
+ }
+ },
+ "400": {
+ "description": "Bad request - the body will include a message stating what is wrong."
+ },
+ "404": {
+ "description": "Chart or context is not found. The supplied chart or context will be reported."
+ },
+ "500": {
+ "description": "Internal server error. This usually means the server is out of memory."
+ }
+ }
+ }
+ },
+ "/badge.svg": {
+ "get": {
+ "summary": "Generate a badge in form of SVG image for a chart (or dimension)",
+ "description": "Successful responses are SVG images.",
+ "parameters": [
+ {
+ "name": "chart",
+ "in": "query",
+ "description": "The id of the chart as returned by the /charts call.",
+ "required": true,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "string",
+ "format": "as returned by /charts",
+ "default": "system.cpu"
+ }
+ },
+ {
+ "name": "alarm",
+ "in": "query",
+ "description": "The name of an alarm linked to the chart.",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "string",
+ "format": "any text"
+ }
+ },
+ {
+ "name": "dimension",
+ "in": "query",
+ "description": "Zero, one or more dimension ids, as returned by the /chart call.",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "array",
+ "items": {
+ "type": "string",
+ "format": "as returned by /charts"
+ }
+ }
+ },
+ {
+ "name": "after",
+ "in": "query",
+ "description": "This parameter can either be an absolute timestamp specifying the starting point of the data to be returned, or a relative number of seconds, to the last collected timestamp. Netdata will assume it is a relative number if it is smaller than the duration of the round robin database for this chart. So, if the round robin database is 3600 seconds, any value from -3600 to 3600 will trigger relative arithmetics. Netdata will adapt this parameter to the boundaries of the round robin database.",
+ "required": true,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "number",
+ "format": "integer",
+ "default": -600
+ }
+ },
+ {
+ "name": "before",
+ "in": "query",
+ "description": "This parameter can either be an absolute timestamp specifying the ending point of the data to be returned, or a relative number of seconds, to the last collected timestamp. Netdata will assume it is a relative number if it is smaller than the duration of the round robin database for this chart. So, if the round robin database is 3600 seconds, any value from -3600 to 3600 will trigger relative arithmetics. Netdata will adapt this parameter to the boundaries of the round robin database.",
+ "required": false,
+ "schema": {
+ "type": "number",
+ "format": "integer",
+ "default": 0
+ }
+ },
+ {
+ "name": "group",
+ "in": "query",
+ "description": "The grouping method. If multiple collected values are to be grouped in order to return fewer points, this parameters defines the method of grouping. methods are supported \"min\", \"max\", \"average\", \"sum\", \"incremental-sum\". \"max\" is actually calculated on the absolute value collected (so it works for both positive and negative dimensions to return the most extreme value in either direction).",
+ "required": true,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "string",
+ "enum": [
+ "min",
+ "max",
+ "average",
+ "median",
+ "stddev",
+ "sum",
+ "incremental-sum",
+ "ses",
+ "des",
+ "cv",
+ "countif",
+ "percentile",
+ "percentile25",
+ "percentile50",
+ "percentile75",
+ "percentile80",
+ "percentile90",
+ "percentile95",
+ "percentile97",
+ "percentile98",
+ "percentile99",
+ "trimmed-mean",
+ "trimmed-mean1",
+ "trimmed-mean2",
+ "trimmed-mean3",
+ "trimmed-mean5",
+ "trimmed-mean10",
+ "trimmed-mean15",
+ "trimmed-mean20",
+ "trimmed-mean25",
+ "trimmed-median",
+ "trimmed-median1",
+ "trimmed-median2",
+ "trimmed-median3",
+ "trimmed-median5",
+ "trimmed-median10",
+ "trimmed-median15",
+ "trimmed-median20",
+ "trimmed-median25"
+ ],
+ "default": "average"
+ }
+ },
+ {
+ "name": "options",
+ "in": "query",
+ "description": "Options that affect data generation.",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "array",
+ "items": {
+ "type": "string",
+ "enum": [
+ "abs",
+ "absolute",
+ "display-absolute",
+ "absolute-sum",
+ "null2zero",
+ "percentage",
+ "unaligned",
+ "anomaly-bit"
+ ]
+ },
+ "default": [
+ "absolute"
+ ]
+ }
+ },
+ {
+ "name": "label",
+ "in": "query",
+ "description": "A text to be used as the label.",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "string",
+ "format": "any text"
+ }
+ },
+ {
+ "name": "units",
+ "in": "query",
+ "description": "A text to be used as the units.",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "string",
+ "format": "any text"
+ }
+ },
+ {
+ "name": "label_color",
+ "in": "query",
+ "description": "A color to be used for the background of the label side(left side) of the badge. One of predefined colors or specific color in hex `RGB` or `RRGGBB` format (without preceding `#` character). If value wrong or not given default color will be used.",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": [
+ "green",
+ "brightgreen",
+ "yellow",
+ "yellowgreen",
+ "orange",
+ "red",
+ "blue",
+ "grey",
+ "gray",
+ "lightgrey",
+ "lightgray"
+ ]
+ },
+ {
+ "type": "string",
+ "format": "^([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$"
+ }
+ ]
+ }
+ },
+ {
+ "name": "value_color",
+ "in": "query",
+ "description": "A color to be used for the background of the value *(right)* part of badge. You can set multiple using a pipe with a condition each, like this: `color<value|color:null` The following operators are supported: >, <, >=, <=, =, :null (to check if no value exists). Each color can be specified in same manner as for `label_color` parameter. Currently only integers are supported as values.",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "string",
+ "format": "any text"
+ }
+ },
+ {
+ "name": "text_color_lbl",
+ "in": "query",
+ "description": "Font color for label *(left)* part of the badge. One of predefined colors or as HTML hexadecimal color without preceding `#` character. Formats allowed `RGB` or `RRGGBB`. If no or wrong value given default color will be used.",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": [
+ "green",
+ "brightgreen",
+ "yellow",
+ "yellowgreen",
+ "orange",
+ "red",
+ "blue",
+ "grey",
+ "gray",
+ "lightgrey",
+ "lightgray"
+ ]
+ },
+ {
+ "type": "string",
+ "format": "^([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$"
+ }
+ ]
+ }
+ },
+ {
+ "name": "text_color_val",
+ "in": "query",
+ "description": "Font color for value *(right)* part of the badge. One of predefined colors or as HTML hexadecimal color without preceding `#` character. Formats allowed `RGB` or `RRGGBB`. If no or wrong value given default color will be used.",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": [
+ "green",
+ "brightgreen",
+ "yellow",
+ "yellowgreen",
+ "orange",
+ "red",
+ "blue",
+ "grey",
+ "gray",
+ "lightgrey",
+ "lightgray"
+ ]
+ },
+ {
+ "type": "string",
+ "format": "^([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$"
+ }
+ ]
+ }
+ },
+ {
+ "name": "multiply",
+ "in": "query",
+ "description": "Multiply the value with this number for rendering it at the image (integer value required).",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "number",
+ "format": "integer"
+ }
+ },
+ {
+ "name": "divide",
+ "in": "query",
+ "description": "Divide the value with this number for rendering it at the image (integer value required).",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "number",
+ "format": "integer"
+ }
+ },
+ {
+ "name": "scale",
+ "in": "query",
+ "description": "Set the scale of the badge (greater or equal to 100).",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "number",
+ "format": "integer"
+ }
+ },
+ {
+ "name": "fixed_width_lbl",
+ "in": "query",
+ "description": "This parameter overrides auto-sizing of badge and creates it with fixed width. This parameter determines the size of the label's left side *(label/name)*. You must set this parameter together with `fixed_width_val` otherwise it will be ignored. You should set the label/value widths wide enough to provide space for all the possible values/contents of the badge you're requesting. In case the text cannot fit the space given it will be clipped. The `scale` parameter still applies on the values you give to `fixed_width_lbl` and `fixed_width_val`.",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "number",
+ "format": "integer"
+ }
+ },
+ {
+ "name": "fixed_width_val",
+ "in": "query",
+ "description": "This parameter overrides auto-sizing of badge and creates it with fixed width. This parameter determines the size of the label's right side *(value)*. You must set this parameter together with `fixed_width_lbl` otherwise it will be ignored. You should set the label/value widths wide enough to provide space for all the possible values/contents of the badge you're requesting. In case the text cannot fit the space given it will be clipped. The `scale` parameter still applies on the values you give to `fixed_width_lbl` and `fixed_width_val`.",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "number",
+ "format": "integer"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "The call was successful. The response should be an SVG image."
+ },
+ "400": {
+ "description": "Bad request - the body will include a message stating what is wrong."
+ },
+ "404": {
+ "description": "No chart with the given id is found."
+ },
+ "500": {
+ "description": "Internal server error. This usually means the server is out of memory."
+ }
+ }
+ }
+ },
+ "/allmetrics": {
+ "get": {
+ "summary": "Get a value of all the metrics maintained by netdata",
+ "description": "The allmetrics endpoint returns the latest value of all charts and dimensions stored in the netdata server.",
+ "parameters": [
+ {
+ "name": "format",
+ "in": "query",
+ "description": "The format of the response to be returned.",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "enum": [
+ "shell",
+ "prometheus",
+ "prometheus_all_hosts",
+ "json"
+ ],
+ "default": "shell"
+ }
+ },
+ {
+ "name": "filter",
+ "in": "query",
+ "description": "Allows to filter charts out using simple patterns.",
+ "required": false,
+ "schema": {
+ "type": "string",
+ "format": "any text"
+ }
+ },
+ {
+ "name": "variables",
+ "in": "query",
+ "description": "When enabled, netdata will expose various system configuration metrics.",
+ "required": false,
+ "schema": {
+ "type": "string",
+ "enum": [
+ "yes",
+ "no"
+ ],
+ "default": "no"
+ }
+ },
+ {
+ "name": "help",
+ "in": "query",
+ "description": "Enable or disable HELP lines in prometheus output.",
+ "required": false,
+ "schema": {
+ "type": "string",
+ "enum": [
+ "yes",
+ "no"
+ ],
+ "default": "no"
+ }
+ },
+ {
+ "name": "types",
+ "in": "query",
+ "description": "Enable or disable TYPE lines in prometheus output.",
+ "required": false,
+ "schema": {
+ "type": "string",
+ "enum": [
+ "yes",
+ "no"
+ ],
+ "default": "no"
+ }
+ },
+ {
+ "name": "timestamps",
+ "in": "query",
+ "description": "Enable or disable timestamps in prometheus output.",
+ "required": false,
+ "schema": {
+ "type": "string",
+ "enum": [
+ "yes",
+ "no"
+ ],
+ "default": "yes"
+ }
+ },
+ {
+ "name": "names",
+ "in": "query",
+ "description": "When enabled netdata will report dimension names. When disabled netdata will report dimension IDs. The default is controlled in netdata.conf.",
+ "required": false,
+ "schema": {
+ "type": "string",
+ "enum": [
+ "yes",
+ "no"
+ ],
+ "default": "yes"
+ }
+ },
+ {
+ "name": "oldunits",
+ "in": "query",
+ "description": "When enabled, netdata will show metric names for the default source=average as they appeared before 1.12, by using the legacy unit naming conventions.",
+ "required": false,
+ "schema": {
+ "type": "string",
+ "enum": [
+ "yes",
+ "no"
+ ],
+ "default": "yes"
+ }
+ },
+ {
+ "name": "hideunits",
+ "in": "query",
+ "description": "When enabled, netdata will not include the units in the metric names, for the default source=average.",
+ "required": false,
+ "schema": {
+ "type": "string",
+ "enum": [
+ "yes",
+ "no"
+ ],
+ "default": "yes"
+ }
+ },
+ {
+ "name": "server",
+ "in": "query",
+ "description": "Set a distinct name of the client querying prometheus metrics. Netdata will use the client IP if this is not set.",
+ "required": false,
+ "schema": {
+ "type": "string",
+ "format": "any text"
+ }
+ },
+ {
+ "name": "prefix",
+ "in": "query",
+ "description": "Prefix all prometheus metrics with this string.",
+ "required": false,
+ "schema": {
+ "type": "string",
+ "format": "any text"
+ }
+ },
+ {
+ "name": "data",
+ "in": "query",
+ "description": "Select the prometheus response data source. There is a setting in netdata.conf for the default.",
+ "required": false,
+ "schema": {
+ "type": "string",
+ "enum": [
+ "as-collected",
+ "average",
+ "sum"
+ ],
+ "default": "average"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "All the metrics returned in the format requested."
+ },
+ "400": {
+ "description": "The format requested is not supported."
+ }
+ }
+ }
+ },
+ "/alarms": {
+ "get": {
+ "summary": "Get a list of active or raised alarms on the server",
+ "description": "The alarms endpoint returns the list of all raised or enabled alarms on the netdata server. Called without any parameters, the raised alarms in state WARNING or CRITICAL are returned. By passing \"?all\", all the enabled alarms are returned.",
+ "parameters": [
+ {
+ "name": "all",
+ "in": "query",
+ "description": "If passed, all enabled alarms are returned.",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "boolean"
+ }
+ },
+ {
+ "name": "active",
+ "in": "query",
+ "description": "If passed, the raised alarms in state WARNING or CRITICAL are returned.",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "boolean"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "An object containing general info and a linked list of alarms.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/alarms"
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "/alarms_values": {
+ "get": {
+ "summary": "Get a list of active or raised alarms on the server",
+ "description": "The alarms_values endpoint returns the list of all raised or enabled alarms on the netdata server. Called without any parameters, the raised alarms in state WARNING or CRITICAL are returned. By passing '?all', all the enabled alarms are returned. This option output differs from `/alarms` in the number of variables delivered. This endpoint gives to user `id`, `value`, `last_updated` time, and alarm `status`.",
+ "parameters": [
+ {
+ "name": "all",
+ "in": "query",
+ "description": "If passed, all enabled alarms are returned.",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "boolean"
+ }
+ },
+ {
+ "name": "active",
+ "in": "query",
+ "description": "If passed, the raised alarms in state WARNING or CRITICAL are returned.",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "boolean"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "An object containing general info and a linked list of alarms.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/alarms_values"
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "/alarm_log": {
+ "get": {
+ "summary": "Retrieves the entries of the alarm log",
+ "description": "Returns an array of alarm_log entries, with historical information on raised and cleared alarms.",
+ "parameters": [
+ {
+ "name": "after",
+ "in": "query",
+ "description": "Passing the parameter after=UNIQUEID returns all the events in the alarm log that occurred after UNIQUEID. An automated series of calls would call the interface once without after=, store the last UNIQUEID of the returned set, and give it back to get incrementally the next events.",
+ "required": false,
+ "schema": {
+ "type": "integer"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "An array of alarm log entries.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/alarm_log_entry"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "/alarm_count": {
+ "get": {
+ "summary": "Get an overall status of the chart",
+ "description": "Checks multiple charts with the same context and counts number of alarms with given status.",
+ "parameters": [
+ {
+ "in": "query",
+ "name": "context",
+ "description": "Specify context which should be checked.",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "default": [
+ "system.cpu"
+ ]
+ }
+ },
+ {
+ "in": "query",
+ "name": "status",
+ "description": "Specify alarm status to count.",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "string",
+ "enum": [
+ "REMOVED",
+ "UNDEFINED",
+ "UNINITIALIZED",
+ "CLEAR",
+ "RAISED",
+ "WARNING",
+ "CRITICAL"
+ ],
+ "default": "RAISED"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "An object containing a count of alarms with given status for given contexts.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "type": "array",
+ "items": {
+ "type": "number"
+ }
+ }
+ }
+ }
+ },
+ "500": {
+ "description": "Internal server error. This usually means the server is out of memory."
+ }
+ }
+ }
+ },
+ "/manage/health": {
+ "get": {
+ "summary": "Accesses the health management API to control health checks and notifications at runtime.",
+ "description": "Available from Netdata v1.12 and above, protected via bearer authorization. Especially useful for maintenance periods, the API allows you to disable health checks completely, silence alarm notifications, or Disable/Silence specific alarms that match selectors on alarm/template name, chart, context, host and family. For the simple disable/silence all scenarios, only the cmd parameter is required. The other parameters are used to define alarm selectors. For more information and examples, refer to the netdata documentation.",
+ "parameters": [
+ {
+ "name": "cmd",
+ "in": "query",
+ "description": "DISABLE ALL: No alarm criteria are evaluated, nothing is written in the alarm log. SILENCE ALL: No notifications are sent. RESET: Return to the default state. DISABLE/SILENCE: Set the mode to be used for the alarms matching the criteria of the alarm selectors. LIST: Show active configuration.",
+ "required": false,
+ "schema": {
+ "type": "string",
+ "enum": [
+ "DISABLE ALL",
+ "SILENCE ALL",
+ "DISABLE",
+ "SILENCE",
+ "RESET",
+ "LIST"
+ ]
+ }
+ },
+ {
+ "name": "alarm",
+ "in": "query",
+ "description": "The expression provided will match both `alarm` and `template` names.",
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "chart",
+ "in": "query",
+ "description": "Chart ids/names, as shown on the dashboard. These will match the `on` entry of a configured `alarm`.",
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "context",
+ "in": "query",
+ "description": "Chart context, as shown on the dashboard. These will match the `on` entry of a configured `template`.",
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "hosts",
+ "in": "query",
+ "description": "The hostnames that will need to match.",
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "families",
+ "in": "query",
+ "description": "The alarm families.",
+ "schema": {
+ "type": "string"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "A plain text response based on the result of the command."
+ },
+ "403": {
+ "description": "Bearer authentication error."
+ }
+ }
+ }
+ },
+ "/aclk": {
+ "get": {
+ "summary": "Get information about current ACLK state",
+ "description": "ACLK endpoint returns detailed information about current state of ACLK (Agent to Cloud communication).",
+ "responses": {
+ "200": {
+ "description": "JSON object with ACLK information.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/aclk_state"
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "/metric_correlations": {
+ "get": {
+ "summary": "Analyze all the metrics to find their correlations",
+ "description": "THIS ENDPOINT IS OBSOLETE. Use the /weights endpoint. Given two time-windows (baseline, highlight), it goes through all the available metrics, querying both windows and tries to find how these two windows relate to each other. It supports multiple algorithms to do so. The result is a list of all metrics evaluated, weighted for 0.0 (the two windows are more different) to 1.0 (the two windows are similar). The algorithm adjusts automatically the baseline window to be a power of two multiple of the highlighted (1, 2, 4, 8, etc).",
+ "parameters": [
+ {
+ "name": "baseline_after",
+ "in": "query",
+ "description": "This parameter can either be an absolute timestamp specifying the starting point of baseline window, or a relative number of seconds (negative, relative to parameter baseline_before). Netdata will assume it is a relative number if it is less that 3 years (in seconds).",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "number",
+ "format": "integer",
+ "default": -300
+ }
+ },
+ {
+ "name": "baseline_before",
+ "in": "query",
+ "description": "This parameter can either be an absolute timestamp specifying the ending point of the baseline window, or a relative number of seconds (negative), relative to the last collected timestamp. Netdata will assume it is a relative number if it is less than 3 years (in seconds).",
+ "required": false,
+ "schema": {
+ "type": "number",
+ "format": "integer",
+ "default": -60
+ }
+ },
+ {
+ "name": "after",
+ "in": "query",
+ "description": "This parameter can either be an absolute timestamp specifying the starting point of highlighted window, or a relative number of seconds (negative, relative to parameter highlight_before). Netdata will assume it is a relative number if it is less that 3 years (in seconds).",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "number",
+ "format": "integer",
+ "default": -60
+ }
+ },
+ {
+ "name": "before",
+ "in": "query",
+ "description": "This parameter can either be an absolute timestamp specifying the ending point of the highlighted window, or a relative number of seconds (negative), relative to the last collected timestamp. Netdata will assume it is a relative number if it is less than 3 years (in seconds).",
+ "required": false,
+ "schema": {
+ "type": "number",
+ "format": "integer",
+ "default": 0
+ }
+ },
+ {
+ "name": "points",
+ "in": "query",
+ "description": "The number of points to be evaluated for the highlighted window. The baseline window will be adjusted automatically to receive a proportional amount of points.",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "number",
+ "format": "integer",
+ "default": 500
+ }
+ },
+ {
+ "name": "method",
+ "in": "query",
+ "description": "the algorithm to run",
+ "required": false,
+ "schema": {
+ "type": "string",
+ "enum": [
+ "ks2",
+ "volume"
+ ],
+ "default": "ks2"
+ }
+ },
+ {
+ "name": "timeout",
+ "in": "query",
+ "description": "Cancel the query if to takes more that this amount of milliseconds.",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "number",
+ "format": "integer",
+ "default": 60000
+ }
+ },
+ {
+ "name": "options",
+ "in": "query",
+ "description": "Options that affect data generation.",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "array",
+ "items": {
+ "type": "string",
+ "enum": [
+ "min2max",
+ "abs",
+ "absolute",
+ "absolute-sum",
+ "null2zero",
+ "percentage",
+ "unaligned",
+ "allow_past",
+ "nonzero",
+ "anomaly-bit",
+ "raw"
+ ]
+ },
+ "default": [
+ "null2zero",
+ "allow_past",
+ "nonzero",
+ "unaligned"
+ ]
+ }
+ },
+ {
+ "name": "group",
+ "in": "query",
+ "description": "The grouping method. If multiple collected values are to be grouped in order to return fewer points, this parameters defines the method of grouping. methods supported \"min\", \"max\", \"average\", \"sum\", \"incremental-sum\". \"max\" is actually calculated on the absolute value collected (so it works for both positive and negative dimensions to return the most extreme value in either direction).",
+ "required": true,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "string",
+ "enum": [
+ "min",
+ "max",
+ "average",
+ "median",
+ "stddev",
+ "sum",
+ "incremental-sum",
+ "ses",
+ "des",
+ "cv",
+ "countif",
+ "percentile",
+ "percentile25",
+ "percentile50",
+ "percentile75",
+ "percentile80",
+ "percentile90",
+ "percentile95",
+ "percentile97",
+ "percentile98",
+ "percentile99",
+ "trimmed-mean",
+ "trimmed-mean1",
+ "trimmed-mean2",
+ "trimmed-mean3",
+ "trimmed-mean5",
+ "trimmed-mean10",
+ "trimmed-mean15",
+ "trimmed-mean20",
+ "trimmed-mean25",
+ "trimmed-median",
+ "trimmed-median1",
+ "trimmed-median2",
+ "trimmed-median3",
+ "trimmed-median5",
+ "trimmed-median10",
+ "trimmed-median15",
+ "trimmed-median20",
+ "trimmed-median25"
+ ],
+ "default": "average"
+ }
+ },
+ {
+ "name": "group_options",
+ "in": "query",
+ "description": "When the group function supports additional parameters, this field can be used to pass them to it. Currently only \"countif\" supports this.",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "string"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "JSON object with weights for each chart and dimension.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/metric_correlations"
+ }
+ }
+ }
+ },
+ "400": {
+ "description": "The given parameters are invalid."
+ },
+ "403": {
+ "description": "metrics correlations are not enabled on this Netdata Agent."
+ },
+ "404": {
+ "description": "No charts could be found, or the method that correlated the metrics did not produce any result."
+ },
+ "504": {
+ "description": "Timeout - the query took too long and has been cancelled."
+ }
+ }
+ }
+ },
+ "/function": {
+ "get": {
+ "summary": "Execute a collector function.",
+ "parameters": [
+ {
+ "name": "function",
+ "in": "query",
+ "description": "The name of the function, as returned by the collector.",
+ "required": true,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "timeout",
+ "in": "query",
+ "description": "The timeout in seconds to wait for the function to complete.",
+ "required": false,
+ "schema": {
+ "type": "number",
+ "format": "integer",
+ "default": 10
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "The collector function has been executed successfully. Each collector may return a different type of content."
+ },
+ "400": {
+ "description": "The request was rejected by the collector."
+ },
+ "404": {
+ "description": "The requested function is not found."
+ },
+ "500": {
+ "description": "Other internal error, getting this error means there is a bug in Netdata."
+ },
+ "503": {
+ "description": "The collector to execute the function is not currently available."
+ },
+ "504": {
+ "description": "Timeout while waiting for the collector to execute the function."
+ },
+ "591": {
+ "description": "The collector sent a response, but it was invalid or corrupted."
+ }
+ }
+ }
+ },
+ "/functions": {
+ "get": {
+ "summary": "Get a list of all registered collector functions.",
+ "description": "Collector functions are programs that can be executed on demand.",
+ "responses": {
+ "200": {
+ "description": "A JSON object containing one object per supported function."
+ }
+ }
+ }
+ },
+ "/weights": {
+ "get": {
+ "summary": "Analyze all the metrics using an algorithm and score them accordingly",
+ "description": "This endpoint goes through all metrics and scores them according to an algorithm.",
+ "parameters": [
+ {
+ "name": "baseline_after",
+ "in": "query",
+ "description": "This parameter can either be an absolute timestamp specifying the starting point of baseline window, or a relative number of seconds (negative, relative to parameter baseline_before). Netdata will assume it is a relative number if it is less that 3 years (in seconds). This parameter is used in KS2 and VOLUME algorithms.",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "number",
+ "format": "integer",
+ "default": -300
+ }
+ },
+ {
+ "name": "baseline_before",
+ "in": "query",
+ "description": "This parameter can either be an absolute timestamp specifying the ending point of the baseline window, or a relative number of seconds (negative), relative to the last collected timestamp. Netdata will assume it is a relative number if it is less than 3 years (in seconds). This parameter is used in KS2 and VOLUME algorithms.",
+ "required": false,
+ "schema": {
+ "type": "number",
+ "format": "integer",
+ "default": -60
+ }
+ },
+ {
+ "name": "after",
+ "in": "query",
+ "description": "This parameter can either be an absolute timestamp specifying the starting point of highlighted window, or a relative number of seconds (negative, relative to parameter highlight_before). Netdata will assume it is a relative number if it is less that 3 years (in seconds).",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "number",
+ "format": "integer",
+ "default": -60
+ }
+ },
+ {
+ "name": "before",
+ "in": "query",
+ "description": "This parameter can either be an absolute timestamp specifying the ending point of the highlighted window, or a relative number of seconds (negative), relative to the last collected timestamp. Netdata will assume it is a relative number if it is less than 3 years (in seconds).",
+ "required": false,
+ "schema": {
+ "type": "number",
+ "format": "integer",
+ "default": 0
+ }
+ },
+ {
+ "name": "context",
+ "in": "query",
+ "description": "A simple pattern matching the contexts to evaluate.",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "points",
+ "in": "query",
+ "description": "The number of points to be evaluated for the highlighted window. The baseline window will be adjusted automatically to receive a proportional amount of points. This parameter is only used by the KS2 algorithm.",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "number",
+ "format": "integer",
+ "default": 500
+ }
+ },
+ {
+ "name": "method",
+ "in": "query",
+ "description": "the algorithm to run",
+ "required": false,
+ "schema": {
+ "type": "string",
+ "enum": [
+ "ks2",
+ "volume",
+ "anomaly-rate"
+ ],
+ "default": "anomaly-rate"
+ }
+ },
+ {
+ "name": "tier",
+ "in": "query",
+ "description": "Use the specified database tier",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "number",
+ "format": "integer"
+ }
+ },
+ {
+ "name": "timeout",
+ "in": "query",
+ "description": "Cancel the query if to takes more that this amount of milliseconds.",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "number",
+ "format": "integer",
+ "default": 60000
+ }
+ },
+ {
+ "name": "options",
+ "in": "query",
+ "description": "Options that affect data generation.",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "array",
+ "items": {
+ "type": "string",
+ "enum": [
+ "min2max",
+ "abs",
+ "absolute",
+ "absolute-sum",
+ "null2zero",
+ "percentage",
+ "unaligned",
+ "nonzero",
+ "anomaly-bit",
+ "raw"
+ ]
+ },
+ "default": [
+ "null2zero",
+ "nonzero",
+ "unaligned"
+ ]
+ }
+ },
+ {
+ "name": "group",
+ "in": "query",
+ "description": "The grouping method. If multiple collected values are to be grouped in order to return fewer points, this parameters defines the method of grouping. methods supported \"min\", \"max\", \"average\", \"sum\", \"incremental-sum\". \"max\" is actually calculated on the absolute value collected (so it works for both positive and negative dimensions to return the most extreme value in either direction).",
+ "required": true,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "string",
+ "enum": [
+ "min",
+ "max",
+ "average",
+ "median",
+ "stddev",
+ "sum",
+ "incremental-sum",
+ "ses",
+ "des",
+ "cv",
+ "countif",
+ "percentile",
+ "percentile25",
+ "percentile50",
+ "percentile75",
+ "percentile80",
+ "percentile90",
+ "percentile95",
+ "percentile97",
+ "percentile98",
+ "percentile99",
+ "trimmed-mean",
+ "trimmed-mean1",
+ "trimmed-mean2",
+ "trimmed-mean3",
+ "trimmed-mean5",
+ "trimmed-mean10",
+ "trimmed-mean15",
+ "trimmed-mean20",
+ "trimmed-mean25",
+ "trimmed-median",
+ "trimmed-median1",
+ "trimmed-median2",
+ "trimmed-median3",
+ "trimmed-median5",
+ "trimmed-median10",
+ "trimmed-median15",
+ "trimmed-median20",
+ "trimmed-median25"
+ ],
+ "default": "average"
+ }
+ },
+ {
+ "name": "group_options",
+ "in": "query",
+ "description": "When the group function supports additional parameters, this field can be used to pass them to it. Currently only \"countif\" supports this.",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "string"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "JSON object with weights for each context, chart and dimension.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/weights"
+ }
+ }
+ }
+ },
+ "400": {
+ "description": "The given parameters are invalid."
+ },
+ "403": {
+ "description": "metrics correlations are not enabled on this Netdata Agent."
+ },
+ "404": {
+ "description": "No charts could be found, or the method that correlated the metrics did not produce any result."
+ },
+ "504": {
+ "description": "Timeout - the query took too long and has been cancelled."
+ }
+ }
+ }
+ }
+ },
+ "servers": [
+ {
+ "url": "https://registry.my-netdata.io/api/v1"
+ },
+ {
+ "url": "http://registry.my-netdata.io/api/v1"
+ }
+ ],
+ "components": {
+ "schemas": {
+ "info": {
+ "type": "object",
+ "properties": {
+ "version": {
+ "type": "string",
+ "description": "netdata version of the server.",
+ "example": "1.11.1_rolling"
+ },
+ "uid": {
+ "type": "string",
+ "description": "netdata unique id of the server.",
+ "example": "24e9fe3c-f2ac-11e8-bafc-0242ac110002"
+ },
+ "mirrored_hosts": {
+ "type": "array",
+ "description": "List of hosts mirrored of the server (include itself).",
+ "items": {
+ "type": "string"
+ },
+ "example": [
+ "host1.example.com",
+ "host2.example.com"
+ ]
+ },
+ "mirrored_hosts_status": {
+ "type": "array",
+ "description": "List of details of hosts mirrored to this served (including self). Indexes correspond to indexes in \"mirrored_hosts\".",
+ "items": {
+ "type": "object",
+ "description": "Host data",
+ "properties": {
+ "guid": {
+ "type": "string",
+ "format": "uuid",
+ "nullable": false,
+ "description": "Host unique GUID from `netdata.public.unique.id`.",
+ "example": "245e4bff-3b34-47c1-a6e5-5c535a9abfb2"
+ },
+ "reachable": {
+ "type": "boolean",
+ "nullable": false,
+ "description": "Current state of streaming. Always true for localhost/self."
+ },
+ "claim_id": {
+ "type": "string",
+ "format": "uuid",
+ "nullable": true,
+ "description": "Cloud GUID/identifier in case the host is claimed. If child status unknown or unclaimed this field is set to `null`",
+ "example": "c3b2a66a-3052-498c-ac52-7fe9e8cccb0c"
+ }
+ }
+ }
+ },
+ "os_name": {
+ "type": "string",
+ "description": "Operating System Name.",
+ "example": "Manjaro Linux"
+ },
+ "os_id": {
+ "type": "string",
+ "description": "Operating System ID.",
+ "example": "manjaro"
+ },
+ "os_id_like": {
+ "type": "string",
+ "description": "Known OS similar to this OS.",
+ "example": "arch"
+ },
+ "os_version": {
+ "type": "string",
+ "description": "Operating System Version.",
+ "example": "18.0.4"
+ },
+ "os_version_id": {
+ "type": "string",
+ "description": "Operating System Version ID.",
+ "example": "unknown"
+ },
+ "os_detection": {
+ "type": "string",
+ "description": "OS parameters detection method.",
+ "example": "Mixed"
+ },
+ "kernel_name": {
+ "type": "string",
+ "description": "Kernel Name.",
+ "example": "Linux"
+ },
+ "kernel_version": {
+ "type": "string",
+ "description": "Kernel Version.",
+ "example": "4.19.32-1-MANJARO"
+ },
+ "is_k8s_node": {
+ "type": "boolean",
+ "description": "Netdata is running on a K8s node.",
+ "example": false
+ },
+ "architecture": {
+ "type": "string",
+ "description": "Kernel architecture.",
+ "example": "x86_64"
+ },
+ "virtualization": {
+ "type": "string",
+ "description": "Virtualization Type.",
+ "example": "kvm"
+ },
+ "virt_detection": {
+ "type": "string",
+ "description": "Virtualization detection method.",
+ "example": "systemd-detect-virt"
+ },
+ "container": {
+ "type": "string",
+ "description": "Container technology.",
+ "example": "docker"
+ },
+ "container_detection": {
+ "type": "string",
+ "description": "Container technology detection method.",
+ "example": "dockerenv"
+ },
+ "stream_compression": {
+ "type": "boolean",
+ "description": "Stream transmission compression method.",
+ "example": true
+ },
+ "labels": {
+ "type": "object",
+ "description": "List of host labels.",
+ "properties": {
+ "app": {
+ "type": "string",
+ "description": "Host label.",
+ "example": "netdata"
+ }
+ }
+ },
+ "collectors": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "description": "Array of collector plugins and modules.",
+ "properties": {
+ "plugin": {
+ "type": "string",
+ "description": "Collector plugin.",
+ "example": "python.d.plugin"
+ },
+ "module": {
+ "type": "string",
+ "description": "Module of the collector plugin.",
+ "example": "dockerd"
+ }
+ }
+ }
+ },
+ "alarms": {
+ "type": "object",
+ "description": "Number of alarms in the server.",
+ "properties": {
+ "normal": {
+ "type": "integer",
+ "description": "Number of alarms in normal state."
+ },
+ "warning": {
+ "type": "integer",
+ "description": "Number of alarms in warning state."
+ },
+ "critical": {
+ "type": "integer",
+ "description": "Number of alarms in critical state."
+ }
+ }
+ }
+ }
+ },
+ "chart_summary": {
+ "type": "object",
+ "properties": {
+ "hostname": {
+ "type": "string",
+ "description": "The hostname of the netdata server."
+ },
+ "version": {
+ "type": "string",
+ "description": "netdata version of the server."
+ },
+ "release_channel": {
+ "type": "string",
+ "description": "The release channel of the build on the server.",
+ "example": "nightly"
+ },
+ "timezone": {
+ "type": "string",
+ "description": "The current timezone on the server."
+ },
+ "os": {
+ "type": "string",
+ "description": "The netdata server host operating system.",
+ "enum": [
+ "macos",
+ "linux",
+ "freebsd"
+ ]
+ },
+ "history": {
+ "type": "number",
+ "description": "The duration, in seconds, of the round robin database maintained by netdata."
+ },
+ "memory_mode": {
+ "type": "string",
+ "description": "The name of the database memory mode on the server."
+ },
+ "update_every": {
+ "type": "number",
+ "description": "The default update frequency of the netdata server. All charts have an update frequency equal or bigger than this."
+ },
+ "charts": {
+ "type": "object",
+ "description": "An object containing all the chart objects available at the netdata server. This is used as an indexed array. The key of each chart object is the id of the chart.",
+ "additionalProperties": {
+ "$ref": "#/components/schemas/chart"
+ }
+ },
+ "charts_count": {
+ "type": "number",
+ "description": "The number of charts."
+ },
+ "dimensions_count": {
+ "type": "number",
+ "description": "The total number of dimensions."
+ },
+ "alarms_count": {
+ "type": "number",
+ "description": "The number of alarms."
+ },
+ "rrd_memory_bytes": {
+ "type": "number",
+ "description": "The size of the round robin database in bytes."
+ }
+ }
+ },
+ "chart": {
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "The unique id of the chart."
+ },
+ "name": {
+ "type": "string",
+ "description": "The name of the chart."
+ },
+ "type": {
+ "type": "string",
+ "description": "The type of the chart. Types are not handled by netdata. You can use this field for anything you like."
+ },
+ "family": {
+ "type": "string",
+ "description": "The family of the chart. Families are not handled by netdata. You can use this field for anything you like."
+ },
+ "title": {
+ "type": "string",
+ "description": "The title of the chart."
+ },
+ "priority": {
+ "type": "number",
+ "description": "The relative priority of the chart. Netdata does not care about priorities. This is just an indication of importance for the chart viewers to sort charts of higher priority (lower number) closer to the top. Priority sorting should only be used among charts of the same type or family."
+ },
+ "enabled": {
+ "type": "boolean",
+ "description": "True when the chart is enabled. Disabled charts do not currently collect values, but they may have historical values available."
+ },
+ "units": {
+ "type": "string",
+ "description": "The unit of measurement for the values of all dimensions of the chart."
+ },
+ "data_url": {
+ "type": "string",
+ "description": "The absolute path to get data values for this chart. You are expected to use this path as the base when constructing the URL to fetch data values for this chart."
+ },
+ "chart_type": {
+ "type": "string",
+ "description": "The chart type.",
+ "enum": [
+ "line",
+ "area",
+ "stacked"
+ ]
+ },
+ "duration": {
+ "type": "number",
+ "description": "The duration, in seconds, of the round robin database maintained by netdata."
+ },
+ "first_entry": {
+ "type": "number",
+ "description": "The UNIX timestamp of the first entry (the oldest) in the round robin database."
+ },
+ "last_entry": {
+ "type": "number",
+ "description": "The UNIX timestamp of the latest entry in the round robin database."
+ },
+ "update_every": {
+ "type": "number",
+ "description": "The update frequency of this chart, in seconds. One value every this amount of time is kept in the round robin database."
+ },
+ "dimensions": {
+ "type": "object",
+ "description": "An object containing all the chart dimensions available for the chart. This is used as an indexed array. For each pair in the dictionary: the key is the id of the dimension and the value is a dictionary containing the name.",
+ "additionalProperties": {
+ "type": "object",
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "The name of the dimension"
+ }
+ }
+ }
+ },
+ "chart_variables": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/components/schemas/chart_variables"
+ }
+ },
+ "green": {
+ "type": "number",
+ "nullable": true,
+ "description": "Chart health green threshold."
+ },
+ "red": {
+ "type": "number",
+ "nullable": true,
+ "description": "Chart health red threshold."
+ }
+ }
+ },
+ "context_summary": {
+ "type": "object",
+ "properties": {
+ "hostname": {
+ "type": "string",
+ "description": "The hostname of the netdata server."
+ },
+ "machine_guid": {
+ "type": "string",
+ "description": "The unique installation id of this netdata server."
+ },
+ "node_id": {
+ "type": "string",
+ "description": "The unique node id of this netdata server at the hub.",
+ "example": "nightly"
+ },
+ "claim_id": {
+ "type": "string",
+ "description": "The unique handshake id of this netdata server and the hub."
+ },
+ "host_labels": {
+ "type": "object",
+ "description": "The host labels associated with this netdata server."
+ },
+ "context": {
+ "type": "object",
+ "description": "An object containing all the context objects available at the netdata server. This is used as an indexed array. The key of each context object is the id of the context.",
+ "additionalProperties": {
+ "$ref": "#/components/schemas/context"
+ }
+ }
+ }
+ },
+ "context": {
+ "type": "object",
+ "properties": {
+ "version": {
+ "type": "string",
+ "description": "The version of this context. The number are not sequential, but bigger numbers depict a newer object."
+ },
+ "hub_version": {
+ "type": "string",
+ "description": "The version of this context, as known by hub."
+ },
+ "family": {
+ "type": "string",
+ "description": "The family of the context. When multiple charts of a context have different families, the netdata server replaces the different parts with [x], so that the context can have only one family."
+ },
+ "title": {
+ "type": "string",
+ "description": "The title of the context. When multiple charts of a context have different titles, the netdata server replaces the different parts with [x], so that the context can have only one title."
+ },
+ "priority": {
+ "type": "number",
+ "description": "The relative priority of the context. When multiple contexts have different priorities, the minimum among them is selected as the priority of the context."
+ },
+ "units": {
+ "type": "string",
+ "description": "The unit of measurement for the values of all dimensions of the context. If multiple charts of context have different units, the latest collected is selected."
+ },
+ "chart_type": {
+ "type": "string",
+ "description": "The chart type.",
+ "enum": [
+ "line",
+ "area",
+ "stacked"
+ ]
+ },
+ "first_time_t": {
+ "type": "number",
+ "description": "The UNIX timestamp of the first entry (the oldest) in the database."
+ },
+ "last_time_t": {
+ "type": "number",
+ "description": "The UNIX timestamp of the latest entry in the database."
+ },
+ "charts": {
+ "type": "object",
+ "description": "An object containing all the charts available for the chart. This is used as an indexed array. For each pair in the dictionary, the key is the id of the chart and the value provides all details about the chart."
+ }
+ }
+ },
+ "alarm_variables": {
+ "type": "object",
+ "properties": {
+ "chart": {
+ "type": "string",
+ "description": "The unique id of the chart."
+ },
+ "chart_name": {
+ "type": "string",
+ "description": "The name of the chart."
+ },
+ "cnart_context": {
+ "type": "string",
+ "description": "The context of the chart. It is shared across multiple monitored software or hardware instances and used in alarm templates."
+ },
+ "family": {
+ "type": "string",
+ "description": "The family of the chart."
+ },
+ "host": {
+ "type": "string",
+ "description": "The host containing the chart."
+ },
+ "chart_variables": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/components/schemas/chart_variables"
+ }
+ },
+ "family_variables": {
+ "type": "object",
+ "properties": {
+ "varname1": {
+ "type": "number",
+ "format": "float"
+ },
+ "varname2": {
+ "type": "number",
+ "format": "float"
+ }
+ }
+ },
+ "host_variables": {
+ "type": "object",
+ "properties": {
+ "varname1": {
+ "type": "number",
+ "format": "float"
+ },
+ "varname2": {
+ "type": "number",
+ "format": "float"
+ }
+ }
+ }
+ }
+ },
+ "chart_variables": {
+ "type": "object",
+ "properties": {
+ "varname1": {
+ "type": "number",
+ "format": "float"
+ },
+ "varname2": {
+ "type": "number",
+ "format": "float"
+ }
+ }
+ },
+ "data": {
+ "type": "object",
+ "discriminator": {
+ "propertyName": "format"
+ },
+ "description": "Response will contain the appropriate subtype, e.g. data_json depending on the requested format.",
+ "properties": {
+ "api": {
+ "type": "number",
+ "description": "The API version this conforms to, currently 1."
+ },
+ "id": {
+ "type": "string",
+ "description": "The unique id of the chart."
+ },
+ "name": {
+ "type": "string",
+ "description": "The name of the chart."
+ },
+ "update_every": {
+ "type": "number",
+ "description": "The update frequency of this chart, in seconds. One value every this amount of time is kept in the round robin database (independently of the current view)."
+ },
+ "view_update_every": {
+ "type": "number",
+ "description": "The current view appropriate update frequency of this chart, in seconds. There is no point to request chart refreshes, using the same settings, more frequently than this."
+ },
+ "first_entry": {
+ "type": "number",
+ "description": "The UNIX timestamp of the first entry (the oldest) in the round robin database (independently of the current view)."
+ },
+ "last_entry": {
+ "type": "number",
+ "description": "The UNIX timestamp of the latest entry in the round robin database (independently of the current view)."
+ },
+ "after": {
+ "type": "number",
+ "description": "The UNIX timestamp of the first entry (the oldest) returned in this response."
+ },
+ "before": {
+ "type": "number",
+ "description": "The UNIX timestamp of the latest entry returned in this response."
+ },
+ "min": {
+ "type": "number",
+ "description": "The minimum value returned in the current view. This can be used to size the y-series of the chart."
+ },
+ "max": {
+ "type": "number",
+ "description": "The maximum value returned in the current view. This can be used to size the y-series of the chart."
+ },
+ "dimension_names": {
+ "description": "The dimension names of the chart as returned in the current view.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "dimension_ids": {
+ "description": "The dimension IDs of the chart as returned in the current view.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "latest_values": {
+ "description": "The latest values collected for the chart (independently of the current view).",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "view_latest_values": {
+ "description": "The latest values returned with this response.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "dimensions": {
+ "type": "number",
+ "description": "The number of dimensions returned."
+ },
+ "points": {
+ "type": "number",
+ "description": "The number of rows / points returned."
+ },
+ "format": {
+ "type": "string",
+ "description": "The format of the result returned."
+ },
+ "chart_variables": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/components/schemas/chart_variables"
+ }
+ }
+ }
+ },
+ "data_json": {
+ "description": "Data response in json format.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/data"
+ },
+ {
+ "properties": {
+ "result": {
+ "type": "object",
+ "properties": {
+ "labels": {
+ "description": "The dimensions retrieved from the chart.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "data": {
+ "description": "The data requested, one element per sample with each element containing the values of the dimensions described in the labels value.",
+ "type": "array",
+ "items": {
+ "type": "number"
+ }
+ }
+ },
+ "description": "The result requested, in the format requested."
+ }
+ }
+ }
+ ]
+ },
+ "data_flat": {
+ "description": "Data response in csv / tsv / tsv-excel / ssv / ssv-comma / markdown / html formats.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/data"
+ },
+ {
+ "properties": {
+ "result": {
+ "type": "string"
+ }
+ }
+ }
+ ]
+ },
+ "data_array": {
+ "description": "Data response in array format.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/data"
+ },
+ {
+ "properties": {
+ "result": {
+ "type": "array",
+ "items": {
+ "type": "number"
+ }
+ }
+ }
+ }
+ ]
+ },
+ "data_csvjsonarray": {
+ "description": "Data response in csvjsonarray format.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/data"
+ },
+ {
+ "properties": {
+ "result": {
+ "description": "The first inner array contains strings showing the labels of each column, each subsequent array contains the values for each point in time.",
+ "type": "array",
+ "items": {
+ "type": "array",
+ "items": {}
+ }
+ }
+ }
+ }
+ ]
+ },
+ "data_datatable": {
+ "description": "Data response in datatable / datasource formats (suitable for Google Charts).",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/data"
+ },
+ {
+ "properties": {
+ "result": {
+ "type": "object",
+ "properties": {
+ "cols": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "id": {
+ "description": "Always empty - for future use."
+ },
+ "label": {
+ "description": "The dimension returned from the chart."
+ },
+ "pattern": {
+ "description": "Always empty - for future use."
+ },
+ "type": {
+ "description": "The type of data in the column / chart-dimension."
+ },
+ "p": {
+ "description": "Contains any annotations for the column."
+ }
+ },
+ "required": [
+ "id",
+ "label",
+ "pattern",
+ "type"
+ ]
+ }
+ },
+ "rows": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "c": {
+ "type": "array",
+ "items": {
+ "properties": {
+ "v": {
+ "description": "Each value in the row is represented by an object named `c` with five v fields: data, null, null, 0, the value. This format is fixed by the Google Charts API."
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ ]
+ },
+ "alarms": {
+ "type": "object",
+ "properties": {
+ "hostname": {
+ "type": "string"
+ },
+ "latest_alarm_log_unique_id": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "status": {
+ "type": "boolean"
+ },
+ "now": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "alarms": {
+ "type": "object",
+ "properties": {
+ "chart-name.alarm-name": {
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "name": {
+ "type": "string",
+ "description": "Full alarm name."
+ },
+ "chart": {
+ "type": "string"
+ },
+ "family": {
+ "type": "string"
+ },
+ "active": {
+ "type": "boolean",
+ "description": "Will be false only if the alarm is disabled in the configuration."
+ },
+ "disabled": {
+ "type": "boolean",
+ "description": "Whether the health check for this alarm has been disabled via a health command API DISABLE command."
+ },
+ "silenced": {
+ "type": "boolean",
+ "description": "Whether notifications for this alarm have been silenced via a health command API SILENCE command."
+ },
+ "exec": {
+ "type": "string"
+ },
+ "recipient": {
+ "type": "string"
+ },
+ "source": {
+ "type": "string"
+ },
+ "units": {
+ "type": "string"
+ },
+ "info": {
+ "type": "string"
+ },
+ "status": {
+ "type": "string"
+ },
+ "last_status_change": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "last_updated": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "next_update": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "update_every": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "delay_up_duration": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "delay_down_duration": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "delay_max_duration": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "delay_multiplier": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "delay": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "delay_up_to_timestamp": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "value_string": {
+ "type": "string"
+ },
+ "no_clear_notification": {
+ "type": "boolean"
+ },
+ "lookup_dimensions": {
+ "type": "string"
+ },
+ "db_after": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "db_before": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "lookup_method": {
+ "type": "string"
+ },
+ "lookup_after": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "lookup_before": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "lookup_options": {
+ "type": "string"
+ },
+ "calc": {
+ "type": "string"
+ },
+ "calc_parsed": {
+ "type": "string"
+ },
+ "warn": {
+ "type": "string"
+ },
+ "warn_parsed": {
+ "type": "string"
+ },
+ "crit": {
+ "type": "string"
+ },
+ "crit_parsed": {
+ "type": "string"
+ },
+ "warn_repeat_every": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "crit_repeat_every": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "green": {
+ "type": "string",
+ "format": "nullable"
+ },
+ "red": {
+ "type": "string",
+ "format": "nullable"
+ },
+ "value": {
+ "type": "number"
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "alarm_log_entry": {
+ "type": "object",
+ "properties": {
+ "hostname": {
+ "type": "string"
+ },
+ "unique_id": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "alarm_id": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "alarm_event_id": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "name": {
+ "type": "string"
+ },
+ "chart": {
+ "type": "string"
+ },
+ "family": {
+ "type": "string"
+ },
+ "processed": {
+ "type": "boolean"
+ },
+ "updated": {
+ "type": "boolean"
+ },
+ "exec_run": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "exec_failed": {
+ "type": "boolean"
+ },
+ "exec": {
+ "type": "string"
+ },
+ "recipient": {
+ "type": "string"
+ },
+ "exec_code": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "source": {
+ "type": "string"
+ },
+ "units": {
+ "type": "string"
+ },
+ "when": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "duration": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "non_clear_duration": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "status": {
+ "type": "string"
+ },
+ "old_status": {
+ "type": "string"
+ },
+ "delay": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "delay_up_to_timestamp": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "updated_by_id": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "updates_id": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "value_string": {
+ "type": "string"
+ },
+ "old_value_string": {
+ "type": "string"
+ },
+ "silenced": {
+ "type": "string"
+ },
+ "info": {
+ "type": "string"
+ },
+ "value": {
+ "type": "number",
+ "nullable": true
+ },
+ "old_value": {
+ "type": "number",
+ "nullable": true
+ }
+ }
+ },
+ "alarms_values": {
+ "type": "object",
+ "properties": {
+ "hostname": {
+ "type": "string"
+ },
+ "alarms": {
+ "type": "object",
+ "description": "HashMap with keys being alarm names",
+ "additionalProperties": {
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "integer"
+ },
+ "value": {
+ "type": "integer"
+ },
+ "last_updated": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "status": {
+ "type": "string",
+ "enum": [
+ "REMOVED",
+ "UNDEFINED",
+ "UNINITIALIZED",
+ "CLEAR",
+ "RAISED",
+ "WARNING",
+ "CRITICAL",
+ "UNKNOWN"
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ "aclk_state": {
+ "type": "object",
+ "properties": {
+ "aclk-available": {
+ "type": "string",
+ "description": "Describes whether this agent is capable of connection to the Cloud. False means agent has been built without ACLK component either on purpose (user choice) or due to missing dependency."
+ },
+ "aclk-version": {
+ "type": "integer",
+ "description": "Describes which ACLK version is currently used."
+ },
+ "protocols-supported": {
+ "type": "array",
+ "description": "List of supported protocols for communication with Cloud.",
+ "items": {
+ "type": "string"
+ }
+ },
+ "agent-claimed": {
+ "type": "boolean",
+ "description": "Informs whether this agent has been added to a space in the cloud (User has to perform claiming). If false (user didn't perform claiming) agent will never attempt any cloud connection."
+ },
+ "claimed_id": {
+ "type": "string",
+ "format": "uuid",
+ "description": "Unique ID this agent uses to identify when connecting to cloud"
+ },
+ "online": {
+ "type": "boolean",
+ "description": "Informs if this agent was connected to the cloud at the time this request has been processed."
+ },
+ "used-cloud-protocol": {
+ "type": "string",
+ "description": "Informs which protocol is used to communicate with cloud",
+ "enum": [
+ "Old",
+ "New"
+ ]
+ }
+ }
+ },
+ "metric_correlations": {
+ "type": "object",
+ "properties": {
+ "after": {
+ "description": "the start time of the highlighted window",
+ "type": "integer"
+ },
+ "before": {
+ "description": "the end time of the highlighted window",
+ "type": "integer"
+ },
+ "duration": {
+ "description": "the duration of the highlighted window",
+ "type": "integer"
+ },
+ "points": {
+ "description": "the points of the highlighted window",
+ "type": "integer"
+ },
+ "baseline_after": {
+ "description": "the start time of the baseline window",
+ "type": "integer"
+ },
+ "baseline_before": {
+ "description": "the end time of the baseline window",
+ "type": "integer"
+ },
+ "baseline_duration": {
+ "description": "the duration of the baseline window",
+ "type": "integer"
+ },
+ "baseline_points": {
+ "description": "the points of the baseline window",
+ "type": "integer"
+ },
+ "group": {
+ "description": "the grouping method across time",
+ "type": "string"
+ },
+ "method": {
+ "description": "the correlation method used",
+ "type": "string"
+ },
+ "options": {
+ "description": "a comma separated list of the query options set",
+ "type": "string"
+ },
+ "correlated_dimensions": {
+ "description": "the number of dimensions returned in the result"
+ },
+ "total_dimensions_count": {
+ "description": "the total number of dimensions evaluated",
+ "type": "integer"
+ },
+ "statistics": {
+ "type": "object",
+ "properties": {
+ "query_time_ms": {
+ "type": "number"
+ },
+ "db_queries": {
+ "type": "integer"
+ },
+ "db_points_read": {
+ "type": "integer"
+ },
+ "query_result_points": {
+ "type": "integer"
+ },
+ "binary_searches": {
+ "type": "integer"
+ }
+ }
+ },
+ "correlated_charts": {
+ "type": "object",
+ "description": "An object containing chart objects with their metrics correlations.",
+ "properties": {
+ "chart-id1": {
+ "type": "object",
+ "properties": {
+ "context": {
+ "type": "string"
+ },
+ "dimensions": {
+ "type": "object",
+ "properties": {
+ "dimension1-name": {
+ "type": "number"
+ },
+ "dimension2-name": {
+ "type": "number"
+ }
+ }
+ }
+ }
+ },
+ "chart-id2": {
+ "type": "object",
+ "properties": {
+ "context": {
+ "type": "string"
+ },
+ "dimensions": {
+ "type": "object",
+ "properties": {
+ "dimension1-name": {
+ "type": "number"
+ },
+ "dimension2-name": {
+ "type": "number"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "weights": {
+ "type": "object",
+ "properties": {
+ "after": {
+ "description": "the start time of the highlighted window",
+ "type": "integer"
+ },
+ "before": {
+ "description": "the end time of the highlighted window",
+ "type": "integer"
+ },
+ "duration": {
+ "description": "the duration of the highlighted window",
+ "type": "integer"
+ },
+ "points": {
+ "description": "the points of the highlighted window",
+ "type": "integer"
+ },
+ "baseline_after": {
+ "description": "the start time of the baseline window",
+ "type": "integer"
+ },
+ "baseline_before": {
+ "description": "the end time of the baseline window",
+ "type": "integer"
+ },
+ "baseline_duration": {
+ "description": "the duration of the baseline window",
+ "type": "integer"
+ },
+ "baseline_points": {
+ "description": "the points of the baseline window",
+ "type": "integer"
+ },
+ "group": {
+ "description": "the grouping method across time",
+ "type": "string"
+ },
+ "method": {
+ "description": "the correlation method used",
+ "type": "string"
+ },
+ "options": {
+ "description": "a comma separated list of the query options set",
+ "type": "string"
+ },
+ "correlated_dimensions": {
+ "description": "the number of dimensions returned in the result"
+ },
+ "total_dimensions_count": {
+ "description": "the total number of dimensions evaluated",
+ "type": "integer"
+ },
+ "statistics": {
+ "type": "object",
+ "properties": {
+ "query_time_ms": {
+ "type": "number"
+ },
+ "db_queries": {
+ "type": "integer"
+ },
+ "db_points_read": {
+ "type": "integer"
+ },
+ "query_result_points": {
+ "type": "integer"
+ },
+ "binary_searches": {
+ "type": "integer"
+ }
+ }
+ },
+ "contexts": {
+ "description": "A dictionary of weighted context objects.",
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/components/schemas/weighted_context"
+ }
+ }
+ }
+ },
+ "weighted_context": {
+ "type": "object",
+ "properties": {
+ "weight": {
+ "description": "The average weight of the context.",
+ "type": "number"
+ },
+ "charts": {
+ "description": "A dictionary of weighted chart objects.",
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/components/schemas/weighted_chart"
+ }
+ }
+ }
+ },
+ "weighted_chart": {
+ "type": "object",
+ "properties": {
+ "weight": {
+ "description": "The average weight of the context.",
+ "type": "number"
+ },
+ "dimensions": {
+ "description": "A dictionary of weighted dimensions.",
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/components/schemas/weighted_dimension"
+ }
+ }
+ }
+ },
+ "weighted_dimension": {
+ "type": "number"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/web/api/netdata-swagger.yaml b/web/api/netdata-swagger.yaml
new file mode 100644
index 0000000..fced654
--- /dev/null
+++ b/web/api/netdata-swagger.yaml
@@ -0,0 +1,2614 @@
+openapi: 3.0.0
+info:
+ title: Netdata API
+ description: Real-time performance and health monitoring.
+ version: 1.33.1
+paths:
+ /info:
+ get:
+ summary: Get netdata basic information
+ description: |
+ The info endpoint returns basic information about netdata. It provides:
+ * netdata version
+ * netdata unique id
+ * list of hosts mirrored (includes itself)
+ * Operating System, Virtualization, K8s nodes and Container technology information
+ * List of active collector plugins and modules
+ * Streaming information
+ * number of alarms in the host
+ * number of alarms in normal state
+ * number of alarms in warning state
+ * number of alarms in critical state
+ responses:
+ "200":
+ description: netdata basic information.
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/info"
+ "503":
+ description: netdata daemon not ready (used for health checks).
+ /charts:
+ get:
+ summary: Get a list of all charts available at the server
+ description: The charts endpoint returns a summary about all charts stored in the
+ netdata server.
+ responses:
+ "200":
+ description: An array of charts.
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/chart_summary"
+ /chart:
+ get:
+ summary: Get info about a specific chart
+ description: The chart endpoint returns detailed information about a chart.
+ parameters:
+ - name: chart
+ in: query
+ description: The id of the chart as returned by the /charts call.
+ required: true
+ schema:
+ type: string
+ format: as returned by /charts
+ default: system.cpu
+ responses:
+ "200":
+ description: A javascript object with detailed information about the chart.
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/chart"
+ "400":
+ description: No chart id was supplied in the request.
+ "404":
+ description: No chart with the given id is found.
+ /contexts:
+ get:
+ summary: Get a list of all contexts available at the server
+ description: The contexts endpoint returns a summary about all contexts stored in the
+ netdata server.
+ parameters:
+ - name: options
+ in: query
+ description: Options that affect data generation.
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: array
+ items:
+ type: string
+ enum:
+ - full
+ - all
+ - charts
+ - dimensions
+ - labels
+ - uuids
+ - queue
+ - flags
+ - deleted
+ - deepscan
+ default:
+ - full
+ - name: after
+ in: query
+ description: limit the results on context having data after this timestamp.
+ required: false
+ schema:
+ type: number
+ format: integer
+ - name: before
+ in: query
+ description: limit the results on context having data before this timestamp.
+ required: false
+ schema:
+ type: number
+ format: integer
+ - name: chart_label_key
+ in: query
+ description: a simple pattern matching charts label keys (use comma or pipe as separator)
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: string
+ - name: chart_labels_filter
+ in: query
+ description: "a simple pattern matching charts label key and values (use colon for equality, comma or pipe
+ as separator)"
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: string
+ - name: dimensions
+ in: query
+ description: a simple pattern matching dimensions (use comma or pipe as separator)
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: string
+ responses:
+ "200":
+ description: An array of contexts.
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/context_summary"
+ /context:
+ get:
+ summary: Get info about a specific context
+ description: The context endpoint returns detailed information about a given context.
+ parameters:
+ - name: context
+ in: query
+ description: The id of the context as returned by the /contexts call.
+ required: true
+ schema:
+ type: string
+ format: as returned by /contexts
+ default: system.cpu
+ - name: options
+ in: query
+ description: Options that affect data generation.
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: array
+ items:
+ type: string
+ enum:
+ - full
+ - all
+ - charts
+ - dimensions
+ - labels
+ - uuids
+ - queue
+ - flags
+ - deleted
+ - deepscan
+ default:
+ - full
+ - name: after
+ in: query
+ description: limit the results on context having data after this timestamp.
+ required: false
+ schema:
+ type: number
+ format: integer
+ - name: before
+ in: query
+ description: limit the results on context having data before this timestamp.
+ required: false
+ schema:
+ type: number
+ format: integer
+ - name: chart_label_key
+ in: query
+ description: a simple pattern matching charts label keys (use comma or pipe as separator)
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: string
+ - name: chart_labels_filter
+ in: query
+ description: "a simple pattern matching charts label key and values (use colon for equality, comma or pipe
+ as separator)"
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: string
+ - name: dimensions
+ in: query
+ description: a simple pattern matching dimensions (use comma or pipe as separator)
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: string
+ responses:
+ "200":
+ description: A javascript object with detailed information about the context.
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/context"
+ "400":
+ description: No context id was supplied in the request.
+ "404":
+ description: No context with the given id is found.
+ /alarm_variables:
+ get:
+ summary: List variables available to configure alarms for a chart
+ description: Returns the basic information of a chart and all the variables that can
+ be used in alarm and template health configurations for the particular
+ chart or family.
+ parameters:
+ - name: chart
+ in: query
+ description: The id of the chart as returned by the /charts call.
+ required: true
+ schema:
+ type: string
+ format: as returned by /charts
+ default: system.cpu
+ responses:
+ "200":
+ description: A javascript object with information about the chart and the
+ available variables.
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/alarm_variables"
+ "400":
+ description: Bad request - the body will include a message stating what is wrong.
+ "404":
+ description: No chart with the given id is found.
+ "500":
+ description: Internal server error. This usually means the server is out of
+ memory.
+ /data:
+ get:
+ summary: Get collected data for a specific chart
+ description: The data endpoint returns data stored in the round robin database of a
+ chart.
+ parameters:
+ - name: chart
+ in: query
+ description: The id of the chart as returned by the /charts call. Note chart or context must be specified
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: string
+ format: as returned by /charts
+ default: system.cpu
+ - name: context
+ in: query
+ description: The context of the chart as returned by the /charts call. Note chart or context must be specified
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: string
+ format: as returned by /charts
+ - name: dimension
+ in: query
+ description: Zero, one or more dimension ids or names, as returned by the /chart
+ call, separated with comma or pipe. Netdata simple patterns are
+ supported.
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: array
+ items:
+ type: string
+ format: as returned by /charts
+ - name: after
+ in: query
+ description: "This parameter can either be an absolute timestamp specifying the
+ starting point of the data to be returned, or a relative number of
+ seconds (negative, relative to parameter: before). Netdata will
+ assume it is a relative number if it is less that 3 years (in seconds).
+ If not specified the default is -600 seconds. Netdata will adapt this
+ parameter to the boundaries of the round robin database unless the allow_past
+ option is specified."
+ required: true
+ allowEmptyValue: false
+ schema:
+ type: number
+ format: integer
+ default: -600
+ - name: before
+ in: query
+ description: This parameter can either be an absolute timestamp specifying the
+ ending point of the data to be returned, or a relative number of
+ seconds (negative), relative to the last collected timestamp.
+ Netdata will assume it is a relative number if it is less than 3
+ years (in seconds). Netdata will adapt this parameter to the
+ boundaries of the round robin database. The default is zero (i.e.
+ the timestamp of the last value collected).
+ required: false
+ schema:
+ type: number
+ format: integer
+ default: 0
+ - name: points
+ in: query
+ description: The number of points to be returned. If not given, or it is <= 0, or
+ it is bigger than the points stored in the round robin database for
+ this chart for the given duration, all the available collected
+ values for the given duration will be returned.
+ required: true
+ allowEmptyValue: false
+ schema:
+ type: number
+ format: integer
+ default: 20
+ - name: chart_label_key
+ in: query
+ description: Specify the chart label keys that need to match for context queries as comma separated values.
+ At least one matching key is needed to match the corresponding chart.
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: string
+ format: key1,key2,key3
+ - name: chart_labels_filter
+ in: query
+ description: Specify the chart label keys and values to match for context queries. All keys/values need to
+ match for the chart to be included in the query. The labels are specified as key1:value1,key2:value2
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: string
+ format: key1:value1,key2:value2,key3:value3
+ - name: group
+ in: query
+ description: The grouping method. If multiple collected values are to be grouped
+ in order to return fewer points, this parameters defines the method
+ of grouping. methods supported "min", "max", "average", "sum",
+ "incremental-sum". "max" is actually calculated on the absolute
+ value collected (so it works for both positive and negative
+ dimensions to return the most extreme value in either direction).
+ required: true
+ allowEmptyValue: false
+ schema:
+ type: string
+ enum:
+ - min
+ - max
+ - average
+ - median
+ - stddev
+ - sum
+ - incremental-sum
+ - ses
+ - des
+ - cv
+ - countif
+ - percentile
+ - percentile25
+ - percentile50
+ - percentile75
+ - percentile80
+ - percentile90
+ - percentile95
+ - percentile97
+ - percentile98
+ - percentile99
+ - trimmed-mean
+ - trimmed-mean1
+ - trimmed-mean2
+ - trimmed-mean3
+ - trimmed-mean5
+ - trimmed-mean10
+ - trimmed-mean15
+ - trimmed-mean20
+ - trimmed-mean25
+ - trimmed-median
+ - trimmed-median1
+ - trimmed-median2
+ - trimmed-median3
+ - trimmed-median5
+ - trimmed-median10
+ - trimmed-median15
+ - trimmed-median20
+ - trimmed-median25
+ default: average
+ - name: group_options
+ in: query
+ description: When the group function supports additional parameters, this field
+ can be used to pass them to it. Currently only "countif" supports this.
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: string
+ - name: gtime
+ in: query
+ description: The grouping number of seconds. This is used in conjunction with
+ group=average to change the units of metrics (ie when the data is
+ per-second, setting gtime=60 will turn them to per-minute).
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: number
+ format: integer
+ default: 0
+ - name: timeout
+ in: query
+ description: Specify a timeout value in milliseconds after which the agent will
+ abort the query and return a 503 error. A value of 0 indicates no timeout.
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: number
+ format: integer
+ default: 0
+ - name: format
+ in: query
+ description: The format of the data to be returned.
+ required: true
+ allowEmptyValue: false
+ schema:
+ type: string
+ enum:
+ - json
+ - jsonp
+ - csv
+ - tsv
+ - tsv-excel
+ - ssv
+ - ssvcomma
+ - datatable
+ - datasource
+ - html
+ - markdown
+ - array
+ - csvjsonarray
+ default: json
+ - name: options
+ in: query
+ description: Options that affect data generation.
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: array
+ items:
+ type: string
+ enum:
+ - nonzero
+ - flip
+ - jsonwrap
+ - min2max
+ - seconds
+ - milliseconds
+ - abs
+ - absolute
+ - absolute-sum
+ - null2zero
+ - objectrows
+ - google_json
+ - percentage
+ - unaligned
+ - match-ids
+ - match-names
+ - allow_past
+ - anomaly-bit
+ default:
+ - seconds
+ - jsonwrap
+ - name: callback
+ in: query
+ description: For JSONP responses, the callback function name.
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: string
+ - name: filename
+ in: query
+ description: "Add Content-Disposition: attachment; filename= header to
+ the response, that will instruct the browser to save the response
+ with the given filename."
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: string
+ - name: tqx
+ in: query
+ description: "[Google Visualization
+ API](https://developers.google.com/chart/interactive/docs/dev/imple\
+ menting_data_source?hl=en) formatted parameter."
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: string
+ responses:
+ "200":
+ description: The call was successful. The response includes the data in the
+ format requested. Swagger2.0 does not process the discriminator
+ field to show polymorphism. The response will be one of the
+ sub-types of the data-schema according to the chosen format, e.g.
+ json -> data_json.
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/data"
+ "400":
+ description: Bad request - the body will include a message stating what is wrong.
+ "404":
+ description: Chart or context is not found. The supplied chart or context will be reported.
+ "500":
+ description: Internal server error. This usually means the server is out of
+ memory.
+ /badge.svg:
+ get:
+ summary: Generate a badge in form of SVG image for a chart (or dimension)
+ description: Successful responses are SVG images.
+ parameters:
+ - name: chart
+ in: query
+ description: The id of the chart as returned by the /charts call.
+ required: true
+ allowEmptyValue: false
+ schema:
+ type: string
+ format: as returned by /charts
+ default: system.cpu
+ - name: alarm
+ in: query
+ description: The name of an alarm linked to the chart.
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: string
+ format: any text
+ - name: dimension
+ in: query
+ description: Zero, one or more dimension ids, as returned by the /chart call.
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: array
+ items:
+ type: string
+ format: as returned by /charts
+ - name: after
+ in: query
+ description: This parameter can either be an absolute timestamp specifying the
+ starting point of the data to be returned, or a relative number of
+ seconds, to the last collected timestamp. Netdata will assume it is
+ a relative number if it is smaller than the duration of the round
+ robin database for this chart. So, if the round robin database is
+ 3600 seconds, any value from -3600 to 3600 will trigger relative
+ arithmetics. Netdata will adapt this parameter to the boundaries of
+ the round robin database.
+ required: true
+ allowEmptyValue: false
+ schema:
+ type: number
+ format: integer
+ default: -600
+ - name: before
+ in: query
+ description: This parameter can either be an absolute timestamp specifying the
+ ending point of the data to be returned, or a relative number of
+ seconds, to the last collected timestamp. Netdata will assume it is
+ a relative number if it is smaller than the duration of the round
+ robin database for this chart. So, if the round robin database is
+ 3600 seconds, any value from -3600 to 3600 will trigger relative
+ arithmetics. Netdata will adapt this parameter to the boundaries of
+ the round robin database.
+ required: false
+ schema:
+ type: number
+ format: integer
+ default: 0
+ - name: group
+ in: query
+ description: The grouping method. If multiple collected values are to be grouped
+ in order to return fewer points, this parameters defines the method
+ of grouping. methods are supported "min", "max", "average", "sum",
+ "incremental-sum". "max" is actually calculated on the absolute
+ value collected (so it works for both positive and negative
+ dimensions to return the most extreme value in either direction).
+ required: true
+ allowEmptyValue: false
+ schema:
+ type: string
+ enum:
+ - min
+ - max
+ - average
+ - median
+ - stddev
+ - sum
+ - incremental-sum
+ - ses
+ - des
+ - cv
+ - countif
+ - percentile
+ - percentile25
+ - percentile50
+ - percentile75
+ - percentile80
+ - percentile90
+ - percentile95
+ - percentile97
+ - percentile98
+ - percentile99
+ - trimmed-mean
+ - trimmed-mean1
+ - trimmed-mean2
+ - trimmed-mean3
+ - trimmed-mean5
+ - trimmed-mean10
+ - trimmed-mean15
+ - trimmed-mean20
+ - trimmed-mean25
+ - trimmed-median
+ - trimmed-median1
+ - trimmed-median2
+ - trimmed-median3
+ - trimmed-median5
+ - trimmed-median10
+ - trimmed-median15
+ - trimmed-median20
+ - trimmed-median25
+ default: average
+ - name: options
+ in: query
+ description: Options that affect data generation.
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: array
+ items:
+ type: string
+ enum:
+ - abs
+ - absolute
+ - display-absolute
+ - absolute-sum
+ - null2zero
+ - percentage
+ - unaligned
+ - anomaly-bit
+ default:
+ - absolute
+ - name: label
+ in: query
+ description: A text to be used as the label.
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: string
+ format: any text
+ - name: units
+ in: query
+ description: A text to be used as the units.
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: string
+ format: any text
+ - name: label_color
+ in: query
+ description: "A color to be used for the background of the label side(left side) of the badge.
+ One of predefined colors or specific color in hex `RGB` or `RRGGBB` format (without preceding `#` character).
+ If value wrong or not given default color will be used."
+ required: false
+ allowEmptyValue: true
+ schema:
+ oneOf:
+ - type: string
+ enum:
+ - green
+ - brightgreen
+ - yellow
+ - yellowgreen
+ - orange
+ - red
+ - blue
+ - grey
+ - gray
+ - lightgrey
+ - lightgray
+ - type: string
+ format: ^([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$
+ - name: value_color
+ in: query
+ description: "A color to be used for the background of the value *(right)* part of badge. You can set
+ multiple using a pipe with a condition each, like this:
+ `color<value|color:null` The following operators are
+ supported: >, <, >=, <=, =, :null (to check if no value exists).
+ Each color can be specified in same manner as for `label_color` parameter.
+ Currently only integers are supported as values."
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: string
+ format: any text
+ - name: text_color_lbl
+ in: query
+ description: "Font color for label *(left)* part of the badge. One of predefined colors or as HTML hexadecimal
+ color without preceding `#` character. Formats allowed `RGB` or `RRGGBB`. If no or wrong value given default
+ color will be used."
+ required: false
+ allowEmptyValue: true
+ schema:
+ oneOf:
+ - type: string
+ enum:
+ - green
+ - brightgreen
+ - yellow
+ - yellowgreen
+ - orange
+ - red
+ - blue
+ - grey
+ - gray
+ - lightgrey
+ - lightgray
+ - type: string
+ format: ^([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$
+ - name: text_color_val
+ in: query
+ description: "Font color for value *(right)* part of the badge. One of predefined colors or as HTML
+ hexadecimal color without preceding `#` character. Formats allowed `RGB` or `RRGGBB`. If no or wrong value
+ given default color will be used."
+ required: false
+ allowEmptyValue: true
+ schema:
+ oneOf:
+ - type: string
+ enum:
+ - green
+ - brightgreen
+ - yellow
+ - yellowgreen
+ - orange
+ - red
+ - blue
+ - grey
+ - gray
+ - lightgrey
+ - lightgray
+ - type: string
+ format: ^([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$
+ - name: multiply
+ in: query
+ description: Multiply the value with this number for rendering it at the image
+ (integer value required).
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: number
+ format: integer
+ - name: divide
+ in: query
+ description: Divide the value with this number for rendering it at the image
+ (integer value required).
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: number
+ format: integer
+ - name: scale
+ in: query
+ description: Set the scale of the badge (greater or equal to 100).
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: number
+ format: integer
+ - name: fixed_width_lbl
+ in: query
+ description: "This parameter overrides auto-sizing of badge and creates it with fixed width.
+ This parameter determines the size of the label's left side *(label/name)*.
+ You must set this parameter together with `fixed_width_val` otherwise it will be ignored.
+ You should set the label/value widths wide enough to provide space for all the possible values/contents of
+ the badge you're requesting. In case the text cannot fit the space given it will be clipped.
+ The `scale` parameter still applies on the values you give to `fixed_width_lbl` and `fixed_width_val`."
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: number
+ format: integer
+ - name: fixed_width_val
+ in: query
+ description: "This parameter overrides auto-sizing of badge and creates it with fixed width. This parameter
+ determines the size of the label's right side *(value)*. You must set this parameter together with
+ `fixed_width_lbl` otherwise it will be ignored. You should set the label/value widths wide enough to
+ provide space for all the possible values/contents of the badge you're requesting. In case the text cannot
+ fit the space given it will be clipped. The `scale` parameter still applies on the values you give to
+ `fixed_width_lbl` and `fixed_width_val`."
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: number
+ format: integer
+ responses:
+ "200":
+ description: The call was successful. The response should be an SVG image.
+ "400":
+ description: Bad request - the body will include a message stating what is wrong.
+ "404":
+ description: No chart with the given id is found.
+ "500":
+ description: Internal server error. This usually means the server is out of
+ memory.
+ /allmetrics:
+ get:
+ summary: Get a value of all the metrics maintained by netdata
+ description: The allmetrics endpoint returns the latest value of all charts and
+ dimensions stored in the netdata server.
+ parameters:
+ - name: format
+ in: query
+ description: The format of the response to be returned.
+ required: true
+ schema:
+ type: string
+ enum:
+ - shell
+ - prometheus
+ - prometheus_all_hosts
+ - json
+ default: shell
+ - name: filter
+ in: query
+ description: Allows to filter charts out using simple patterns.
+ required: false
+ schema:
+ type: string
+ format: any text
+ - name: variables
+ in: query
+ description: When enabled, netdata will expose various system
+ configuration metrics.
+ required: false
+ schema:
+ type: string
+ enum:
+ - yes
+ - no
+ default: no
+ - name: help
+ in: query
+ description: Enable or disable HELP lines in prometheus output.
+ required: false
+ schema:
+ type: string
+ enum:
+ - yes
+ - no
+ default: no
+ - name: types
+ in: query
+ description: Enable or disable TYPE lines in prometheus output.
+ required: false
+ schema:
+ type: string
+ enum:
+ - yes
+ - no
+ default: no
+ - name: timestamps
+ in: query
+ description: Enable or disable timestamps in prometheus output.
+ required: false
+ schema:
+ type: string
+ enum:
+ - yes
+ - no
+ default: yes
+ - name: names
+ in: query
+ description: When enabled netdata will report dimension names. When disabled
+ netdata will report dimension IDs. The default is controlled in
+ netdata.conf.
+ required: false
+ schema:
+ type: string
+ enum:
+ - yes
+ - no
+ default: yes
+ - name: oldunits
+ in: query
+ description: When enabled, netdata will show metric names for the default
+ source=average as they appeared before 1.12, by using the legacy
+ unit naming conventions.
+ required: false
+ schema:
+ type: string
+ enum:
+ - yes
+ - no
+ default: yes
+ - name: hideunits
+ in: query
+ description: When enabled, netdata will not include the units in the metric
+ names, for the default source=average.
+ required: false
+ schema:
+ type: string
+ enum:
+ - yes
+ - no
+ default: yes
+ - name: server
+ in: query
+ description: Set a distinct name of the client querying prometheus metrics.
+ Netdata will use the client IP if this is not set.
+ required: false
+ schema:
+ type: string
+ format: any text
+ - name: prefix
+ in: query
+ description: Prefix all prometheus metrics with this string.
+ required: false
+ schema:
+ type: string
+ format: any text
+ - name: data
+ in: query
+ description: Select the prometheus response data source. There is a setting in
+ netdata.conf for the default.
+ required: false
+ schema:
+ type: string
+ enum:
+ - as-collected
+ - average
+ - sum
+ default: average
+ responses:
+ "200":
+ description: All the metrics returned in the format requested.
+ "400":
+ description: The format requested is not supported.
+ /alarms:
+ get:
+ summary: Get a list of active or raised alarms on the server
+ description: The alarms endpoint returns the list of all raised or enabled alarms on
+ the netdata server. Called without any parameters, the raised alarms in
+ state WARNING or CRITICAL are returned. By passing "?all", all the
+ enabled alarms are returned.
+ parameters:
+ - name: all
+ in: query
+ description: If passed, all enabled alarms are returned.
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: boolean
+ - name: active
+ in: query
+ description: If passed, the raised alarms in state WARNING or CRITICAL are returned.
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: boolean
+ responses:
+ "200":
+ description: An object containing general info and a linked list of alarms.
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/alarms"
+ /alarms_values:
+ get:
+ summary: Get a list of active or raised alarms on the server
+ description: "The alarms_values endpoint returns the list of all raised or enabled alarms on
+ the netdata server. Called without any parameters, the raised alarms in
+ state WARNING or CRITICAL are returned. By passing '?all', all the
+ enabled alarms are returned.
+ This option output differs from `/alarms` in the number of variables delivered. This endpoint gives
+ to user `id`, `value`, `last_updated` time, and alarm `status`."
+ parameters:
+ - name: all
+ in: query
+ description: If passed, all enabled alarms are returned.
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: boolean
+ - name: active
+ in: query
+ description: If passed, the raised alarms in state WARNING or CRITICAL are returned.
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: boolean
+ responses:
+ "200":
+ description: An object containing general info and a linked list of alarms.
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/alarms_values"
+ /alarm_log:
+ get:
+ summary: Retrieves the entries of the alarm log
+ description: Returns an array of alarm_log entries, with historical information on
+ raised and cleared alarms.
+ parameters:
+ - name: after
+ in: query
+ description: Passing the parameter after=UNIQUEID returns all the events in the
+ alarm log that occurred after UNIQUEID. An automated series of calls
+ would call the interface once without after=, store the last
+ UNIQUEID of the returned set, and give it back to get incrementally
+ the next events.
+ required: false
+ schema:
+ type: integer
+ responses:
+ "200":
+ description: An array of alarm log entries.
+ content:
+ application/json:
+ schema:
+ type: array
+ items:
+ $ref: "#/components/schemas/alarm_log_entry"
+ /alarm_count:
+ get:
+ summary: Get an overall status of the chart
+ description: Checks multiple charts with the same context and counts number of alarms
+ with given status.
+ parameters:
+ - in: query
+ name: context
+ description: Specify context which should be checked.
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: array
+ items:
+ type: string
+ default:
+ - system.cpu
+ - in: query
+ name: status
+ description: Specify alarm status to count.
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: string
+ enum:
+ - REMOVED
+ - UNDEFINED
+ - UNINITIALIZED
+ - CLEAR
+ - RAISED
+ - WARNING
+ - CRITICAL
+ default: RAISED
+ responses:
+ "200":
+ description: An object containing a count of alarms with given status for given
+ contexts.
+ content:
+ application/json:
+ schema:
+ type: array
+ items:
+ type: number
+ "500":
+ description: Internal server error. This usually means the server is out of
+ memory.
+ /manage/health:
+ get:
+ summary: "Accesses the health management API to control health checks and
+ notifications at runtime."
+ description: "Available from Netdata v1.12 and above, protected via bearer
+ authorization. Especially useful for maintenance periods, the API allows
+ you to disable health checks completely, silence alarm notifications, or
+ Disable/Silence specific alarms that match selectors on alarm/template
+ name, chart, context, host and family. For the simple disable/silence
+ all scenarios, only the cmd parameter is required. The other parameters
+ are used to define alarm selectors. For more information and examples,
+ refer to the netdata documentation."
+ parameters:
+ - name: cmd
+ in: query
+ description: "DISABLE ALL: No alarm criteria are evaluated, nothing is written in
+ the alarm log. SILENCE ALL: No notifications are sent. RESET: Return
+ to the default state. DISABLE/SILENCE: Set the mode to be used for
+ the alarms matching the criteria of the alarm selectors. LIST: Show
+ active configuration."
+ required: false
+ schema:
+ type: string
+ enum:
+ - DISABLE ALL
+ - SILENCE ALL
+ - DISABLE
+ - SILENCE
+ - RESET
+ - LIST
+ - name: alarm
+ in: query
+ description: The expression provided will match both `alarm` and `template` names.
+ schema:
+ type: string
+ - name: chart
+ in: query
+ description: Chart ids/names, as shown on the dashboard. These will match the
+ `on` entry of a configured `alarm`.
+ schema:
+ type: string
+ - name: context
+ in: query
+ description: Chart context, as shown on the dashboard. These will match the `on`
+ entry of a configured `template`.
+ schema:
+ type: string
+ - name: hosts
+ in: query
+ description: The hostnames that will need to match.
+ schema:
+ type: string
+ - name: families
+ in: query
+ description: The alarm families.
+ schema:
+ type: string
+ responses:
+ "200":
+ description: A plain text response based on the result of the command.
+ "403":
+ description: Bearer authentication error.
+ /aclk:
+ get:
+ summary: Get information about current ACLK state
+ description: "ACLK endpoint returns detailed information
+ about current state of ACLK (Agent to Cloud communication)."
+ responses:
+ "200":
+ description: JSON object with ACLK information.
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/aclk_state"
+ /metric_correlations:
+ get:
+ summary: "Analyze all the metrics to find their correlations"
+ description: "THIS ENDPOINT IS OBSOLETE. Use the /weights endpoint.
+ Given two time-windows (baseline, highlight), it goes
+ through all the available metrics, querying both windows and tries to find
+ how these two windows relate to each other. It supports
+ multiple algorithms to do so. The result is a list of all
+ metrics evaluated, weighted for 0.0 (the two windows are
+ more different) to 1.0 (the two windows are similar).
+ The algorithm adjusts automatically the baseline window to be
+ a power of two multiple of the highlighted (1, 2, 4, 8, etc)."
+ parameters:
+ - name: baseline_after
+ in: query
+ description: This parameter can either be an absolute timestamp specifying the
+ starting point of baseline window, or a relative number of
+ seconds (negative, relative to parameter baseline_before). Netdata will
+ assume it is a relative number if it is less that 3 years (in seconds).
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: number
+ format: integer
+ default: -300
+ - name: baseline_before
+ in: query
+ description: This parameter can either be an absolute timestamp specifying the
+ ending point of the baseline window, or a relative number of
+ seconds (negative), relative to the last collected timestamp.
+ Netdata will assume it is a relative number if it is less than 3
+ years (in seconds).
+ required: false
+ schema:
+ type: number
+ format: integer
+ default: -60
+ - name: after
+ in: query
+ description: This parameter can either be an absolute timestamp specifying the
+ starting point of highlighted window, or a relative number of
+ seconds (negative, relative to parameter highlight_before). Netdata will
+ assume it is a relative number if it is less that 3 years (in seconds).
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: number
+ format: integer
+ default: -60
+ - name: before
+ in: query
+ description: This parameter can either be an absolute timestamp specifying the
+ ending point of the highlighted window, or a relative number of
+ seconds (negative), relative to the last collected timestamp.
+ Netdata will assume it is a relative number if it is less than 3
+ years (in seconds).
+ required: false
+ schema:
+ type: number
+ format: integer
+ default: 0
+ - name: points
+ in: query
+ description: The number of points to be evaluated for the highlighted window.
+ The baseline window will be adjusted automatically to receive a proportional
+ amount of points.
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: number
+ format: integer
+ default: 500
+ - name: method
+ in: query
+ description: the algorithm to run
+ required: false
+ schema:
+ type: string
+ enum:
+ - ks2
+ - volume
+ default: ks2
+ - name: timeout
+ in: query
+ description: Cancel the query if to takes more that this amount of milliseconds.
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: number
+ format: integer
+ default: 60000
+ - name: options
+ in: query
+ description: Options that affect data generation.
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: array
+ items:
+ type: string
+ enum:
+ - min2max
+ - abs
+ - absolute
+ - absolute-sum
+ - null2zero
+ - percentage
+ - unaligned
+ - allow_past
+ - nonzero
+ - anomaly-bit
+ - raw
+ default:
+ - null2zero
+ - allow_past
+ - nonzero
+ - unaligned
+ - name: group
+ in: query
+ description: The grouping method. If multiple collected values are to be grouped
+ in order to return fewer points, this parameters defines the method
+ of grouping. methods supported "min", "max", "average", "sum",
+ "incremental-sum". "max" is actually calculated on the absolute
+ value collected (so it works for both positive and negative
+ dimensions to return the most extreme value in either direction).
+ required: true
+ allowEmptyValue: false
+ schema:
+ type: string
+ enum:
+ - min
+ - max
+ - average
+ - median
+ - stddev
+ - sum
+ - incremental-sum
+ - ses
+ - des
+ - cv
+ - countif
+ - percentile
+ - percentile25
+ - percentile50
+ - percentile75
+ - percentile80
+ - percentile90
+ - percentile95
+ - percentile97
+ - percentile98
+ - percentile99
+ - trimmed-mean
+ - trimmed-mean1
+ - trimmed-mean2
+ - trimmed-mean3
+ - trimmed-mean5
+ - trimmed-mean10
+ - trimmed-mean15
+ - trimmed-mean20
+ - trimmed-mean25
+ - trimmed-median
+ - trimmed-median1
+ - trimmed-median2
+ - trimmed-median3
+ - trimmed-median5
+ - trimmed-median10
+ - trimmed-median15
+ - trimmed-median20
+ - trimmed-median25
+ default: average
+ - name: group_options
+ in: query
+ description: When the group function supports additional parameters, this field
+ can be used to pass them to it. Currently only "countif" supports this.
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: string
+ responses:
+ "200":
+ description: JSON object with weights for each chart and dimension.
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/metric_correlations"
+ "400":
+ description: The given parameters are invalid.
+ "403":
+ description: metrics correlations are not enabled on this Netdata Agent.
+ "404":
+ description: No charts could be found, or the method
+ that correlated the metrics did not produce any result.
+ "504":
+ description: Timeout - the query took too long and has been cancelled.
+ /function:
+ get:
+ summary: "Execute a collector function."
+ parameters:
+ - name: function
+ in: query
+ description: The name of the function, as returned by the collector.
+ required: true
+ allowEmptyValue: false
+ schema:
+ type: string
+ - name: timeout
+ in: query
+ description: The timeout in seconds to wait for the function to complete.
+ required: false
+ schema:
+ type: number
+ format: integer
+ default: 10
+ responses:
+ "200":
+ description: The collector function has been executed successfully. Each collector may return a different type of content.
+ "400":
+ description: The request was rejected by the collector.
+ "404":
+ description: The requested function is not found.
+ "500":
+ description: Other internal error, getting this error means there is a bug in Netdata.
+ "503":
+ description: The collector to execute the function is not currently available.
+ "504":
+ description: Timeout while waiting for the collector to execute the function.
+ "591":
+ description: The collector sent a response, but it was invalid or corrupted.
+ /functions:
+ get:
+ summary: Get a list of all registered collector functions.
+ description: Collector functions are programs that can be executed on demand.
+ responses:
+ "200":
+ description: A JSON object containing one object per supported function.
+ /weights:
+ get:
+ summary: "Analyze all the metrics using an algorithm and score them accordingly"
+ description: "This endpoint goes through all metrics and scores them according to an algorithm."
+ parameters:
+ - name: baseline_after
+ in: query
+ description: This parameter can either be an absolute timestamp specifying the
+ starting point of baseline window, or a relative number of
+ seconds (negative, relative to parameter baseline_before). Netdata will
+ assume it is a relative number if it is less that 3 years (in seconds).
+ This parameter is used in KS2 and VOLUME algorithms.
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: number
+ format: integer
+ default: -300
+ - name: baseline_before
+ in: query
+ description: This parameter can either be an absolute timestamp specifying the
+ ending point of the baseline window, or a relative number of
+ seconds (negative), relative to the last collected timestamp.
+ Netdata will assume it is a relative number if it is less than 3
+ years (in seconds).
+ This parameter is used in KS2 and VOLUME algorithms.
+ required: false
+ schema:
+ type: number
+ format: integer
+ default: -60
+ - name: after
+ in: query
+ description: This parameter can either be an absolute timestamp specifying the
+ starting point of highlighted window, or a relative number of
+ seconds (negative, relative to parameter highlight_before). Netdata will
+ assume it is a relative number if it is less that 3 years (in seconds).
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: number
+ format: integer
+ default: -60
+ - name: before
+ in: query
+ description: This parameter can either be an absolute timestamp specifying the
+ ending point of the highlighted window, or a relative number of
+ seconds (negative), relative to the last collected timestamp.
+ Netdata will assume it is a relative number if it is less than 3
+ years (in seconds).
+ required: false
+ schema:
+ type: number
+ format: integer
+ default: 0
+ - name: context
+ in: query
+ description: A simple pattern matching the contexts to evaluate.
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: string
+ - name: points
+ in: query
+ description: The number of points to be evaluated for the highlighted window.
+ The baseline window will be adjusted automatically to receive a proportional
+ amount of points.
+ This parameter is only used by the KS2 algorithm.
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: number
+ format: integer
+ default: 500
+ - name: method
+ in: query
+ description: the algorithm to run
+ required: false
+ schema:
+ type: string
+ enum:
+ - ks2
+ - volume
+ - anomaly-rate
+ default: anomaly-rate
+ - name: tier
+ in: query
+ description: Use the specified database tier
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: number
+ format: integer
+ - name: timeout
+ in: query
+ description: Cancel the query if to takes more that this amount of milliseconds.
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: number
+ format: integer
+ default: 60000
+ - name: options
+ in: query
+ description: Options that affect data generation.
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: array
+ items:
+ type: string
+ enum:
+ - min2max
+ - abs
+ - absolute
+ - absolute-sum
+ - null2zero
+ - percentage
+ - unaligned
+ - nonzero
+ - anomaly-bit
+ - raw
+ default:
+ - null2zero
+ - nonzero
+ - unaligned
+ - name: group
+ in: query
+ description: The grouping method. If multiple collected values are to be grouped
+ in order to return fewer points, this parameters defines the method
+ of grouping. methods supported "min", "max", "average", "sum",
+ "incremental-sum". "max" is actually calculated on the absolute
+ value collected (so it works for both positive and negative
+ dimensions to return the most extreme value in either direction).
+ required: true
+ allowEmptyValue: false
+ schema:
+ type: string
+ enum:
+ - min
+ - max
+ - average
+ - median
+ - stddev
+ - sum
+ - incremental-sum
+ - ses
+ - des
+ - cv
+ - countif
+ - percentile
+ - percentile25
+ - percentile50
+ - percentile75
+ - percentile80
+ - percentile90
+ - percentile95
+ - percentile97
+ - percentile98
+ - percentile99
+ - trimmed-mean
+ - trimmed-mean1
+ - trimmed-mean2
+ - trimmed-mean3
+ - trimmed-mean5
+ - trimmed-mean10
+ - trimmed-mean15
+ - trimmed-mean20
+ - trimmed-mean25
+ - trimmed-median
+ - trimmed-median1
+ - trimmed-median2
+ - trimmed-median3
+ - trimmed-median5
+ - trimmed-median10
+ - trimmed-median15
+ - trimmed-median20
+ - trimmed-median25
+ default: average
+ - name: group_options
+ in: query
+ description: When the group function supports additional parameters, this field
+ can be used to pass them to it. Currently only "countif" supports this.
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: string
+ responses:
+ "200":
+ description: JSON object with weights for each context, chart and dimension.
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/weights"
+ "400":
+ description: The given parameters are invalid.
+ "403":
+ description: metrics correlations are not enabled on this Netdata Agent.
+ "404":
+ description: No charts could be found, or the method
+ that correlated the metrics did not produce any result.
+ "504":
+ description: Timeout - the query took too long and has been cancelled.
+servers:
+ - url: https://registry.my-netdata.io/api/v1
+ - url: http://registry.my-netdata.io/api/v1
+components:
+ schemas:
+ info:
+ type: object
+ properties:
+ version:
+ type: string
+ description: netdata version of the server.
+ example: 1.11.1_rolling
+ uid:
+ type: string
+ description: netdata unique id of the server.
+ example: 24e9fe3c-f2ac-11e8-bafc-0242ac110002
+ mirrored_hosts:
+ type: array
+ description: List of hosts mirrored of the server (include itself).
+ items:
+ type: string
+ example:
+ - host1.example.com
+ - host2.example.com
+ mirrored_hosts_status:
+ type: array
+ description: >-
+ List of details of hosts mirrored to this served (including self).
+ Indexes correspond to indexes in "mirrored_hosts".
+ items:
+ type: object
+ description: Host data
+ properties:
+ guid:
+ type: string
+ format: uuid
+ nullable: false
+ description: Host unique GUID from `netdata.public.unique.id`.
+ example: 245e4bff-3b34-47c1-a6e5-5c535a9abfb2
+ reachable:
+ type: boolean
+ nullable: false
+ description: Current state of streaming. Always true for localhost/self.
+ claim_id:
+ type: string
+ format: uuid
+ nullable: true
+ description: >-
+ Cloud GUID/identifier in case the host is claimed.
+ If child status unknown or unclaimed this field is set to `null`
+ example: c3b2a66a-3052-498c-ac52-7fe9e8cccb0c
+ os_name:
+ type: string
+ description: Operating System Name.
+ example: Manjaro Linux
+ os_id:
+ type: string
+ description: Operating System ID.
+ example: manjaro
+ os_id_like:
+ type: string
+ description: Known OS similar to this OS.
+ example: arch
+ os_version:
+ type: string
+ description: Operating System Version.
+ example: 18.0.4
+ os_version_id:
+ type: string
+ description: Operating System Version ID.
+ example: unknown
+ os_detection:
+ type: string
+ description: OS parameters detection method.
+ example: Mixed
+ kernel_name:
+ type: string
+ description: Kernel Name.
+ example: Linux
+ kernel_version:
+ type: string
+ description: Kernel Version.
+ example: 4.19.32-1-MANJARO
+ is_k8s_node:
+ type: boolean
+ description: Netdata is running on a K8s node.
+ example: false
+ architecture:
+ type: string
+ description: Kernel architecture.
+ example: x86_64
+ virtualization:
+ type: string
+ description: Virtualization Type.
+ example: kvm
+ virt_detection:
+ type: string
+ description: Virtualization detection method.
+ example: systemd-detect-virt
+ container:
+ type: string
+ description: Container technology.
+ example: docker
+ container_detection:
+ type: string
+ description: Container technology detection method.
+ example: dockerenv
+ stream_compression:
+ type: boolean
+ description: Stream transmission compression method.
+ example: true
+ labels:
+ type: object
+ description: List of host labels.
+ properties:
+ app:
+ type: string
+ description: Host label.
+ example: netdata
+ collectors:
+ type: array
+ items:
+ type: object
+ description: Array of collector plugins and modules.
+ properties:
+ plugin:
+ type: string
+ description: Collector plugin.
+ example: python.d.plugin
+ module:
+ type: string
+ description: Module of the collector plugin.
+ example: dockerd
+ alarms:
+ type: object
+ description: Number of alarms in the server.
+ properties:
+ normal:
+ type: integer
+ description: Number of alarms in normal state.
+ warning:
+ type: integer
+ description: Number of alarms in warning state.
+ critical:
+ type: integer
+ description: Number of alarms in critical state.
+ chart_summary:
+ type: object
+ properties:
+ hostname:
+ type: string
+ description: The hostname of the netdata server.
+ version:
+ type: string
+ description: netdata version of the server.
+ release_channel:
+ type: string
+ description: The release channel of the build on the server.
+ example: nightly
+ timezone:
+ type: string
+ description: The current timezone on the server.
+ os:
+ type: string
+ description: The netdata server host operating system.
+ enum:
+ - macos
+ - linux
+ - freebsd
+ history:
+ type: number
+ description: The duration, in seconds, of the round robin database maintained by
+ netdata.
+ memory_mode:
+ type: string
+ description: The name of the database memory mode on the server.
+ update_every:
+ type: number
+ description: The default update frequency of the netdata server. All charts have
+ an update frequency equal or bigger than this.
+ charts:
+ type: object
+ description: An object containing all the chart objects available at the netdata
+ server. This is used as an indexed array. The key of each chart
+ object is the id of the chart.
+ additionalProperties:
+ $ref: "#/components/schemas/chart"
+ charts_count:
+ type: number
+ description: The number of charts.
+ dimensions_count:
+ type: number
+ description: The total number of dimensions.
+ alarms_count:
+ type: number
+ description: The number of alarms.
+ rrd_memory_bytes:
+ type: number
+ description: The size of the round robin database in bytes.
+ chart:
+ type: object
+ properties:
+ id:
+ type: string
+ description: The unique id of the chart.
+ name:
+ type: string
+ description: The name of the chart.
+ type:
+ type: string
+ description: The type of the chart. Types are not handled by netdata. You can use
+ this field for anything you like.
+ family:
+ type: string
+ description: The family of the chart. Families are not handled by netdata. You
+ can use this field for anything you like.
+ title:
+ type: string
+ description: The title of the chart.
+ priority:
+ type: number
+ description: The relative priority of the chart. Netdata does not care about
+ priorities. This is just an indication of importance for the chart
+ viewers to sort charts of higher priority (lower number) closer to
+ the top. Priority sorting should only be used among charts of the
+ same type or family.
+ enabled:
+ type: boolean
+ description: True when the chart is enabled. Disabled charts do not currently
+ collect values, but they may have historical values available.
+ units:
+ type: string
+ description: The unit of measurement for the values of all dimensions of the
+ chart.
+ data_url:
+ type: string
+ description: The absolute path to get data values for this chart. You are
+ expected to use this path as the base when constructing the URL to
+ fetch data values for this chart.
+ chart_type:
+ type: string
+ description: The chart type.
+ enum:
+ - line
+ - area
+ - stacked
+ duration:
+ type: number
+ description: The duration, in seconds, of the round robin database maintained by
+ netdata.
+ first_entry:
+ type: number
+ description: The UNIX timestamp of the first entry (the oldest) in the round
+ robin database.
+ last_entry:
+ type: number
+ description: The UNIX timestamp of the latest entry in the round robin database.
+ update_every:
+ type: number
+ description: The update frequency of this chart, in seconds. One value every this
+ amount of time is kept in the round robin database.
+ dimensions:
+ type: object
+ description: "An object containing all the chart dimensions available for the
+ chart. This is used as an indexed array. For each pair in the
+ dictionary: the key is the id of the dimension and the value is a
+ dictionary containing the name."
+ additionalProperties:
+ type: object
+ properties:
+ name:
+ type: string
+ description: The name of the dimension
+ chart_variables:
+ type: object
+ additionalProperties:
+ $ref: "#/components/schemas/chart_variables"
+ green:
+ type: number
+ nullable: true
+ description: Chart health green threshold.
+ red:
+ type: number
+ nullable: true
+ description: Chart health red threshold.
+ context_summary:
+ type: object
+ properties:
+ hostname:
+ type: string
+ description: The hostname of the netdata server.
+ machine_guid:
+ type: string
+ description: The unique installation id of this netdata server.
+ node_id:
+ type: string
+ description: The unique node id of this netdata server at the hub.
+ example: nightly
+ claim_id:
+ type: string
+ description: The unique handshake id of this netdata server and the hub.
+ host_labels:
+ type: object
+ description: The host labels associated with this netdata server.
+ context:
+ type: object
+ description: "An object containing all the context objects available at the netdata server.
+ This is used as an indexed array. The key of each context object is the id of the context."
+ additionalProperties:
+ $ref: "#/components/schemas/context"
+ context:
+ type: object
+ properties:
+ version:
+ type: string
+ description: "The version of this context.
+ The number are not sequential, but bigger numbers depict a newer object."
+ hub_version:
+ type: string
+ description: The version of this context, as known by hub.
+ family:
+ type: string
+ description: "The family of the context. When multiple charts of a context have different families,
+ the netdata server replaces the different parts with [x], so that the context can have only one family."
+ title:
+ type: string
+ description: "The title of the context. When multiple charts of a context have different titles,
+ the netdata server replaces the different parts with [x], so that the context can have only one title."
+ priority:
+ type: number
+ description: "The relative priority of the context. When multiple contexts have different priorities,
+ the minimum among them is selected as the priority of the context."
+ units:
+ type: string
+ description: "The unit of measurement for the values of all dimensions of the context. If multiple charts
+ of context have different units, the latest collected is selected."
+ chart_type:
+ type: string
+ description: The chart type.
+ enum:
+ - line
+ - area
+ - stacked
+ first_time_t:
+ type: number
+ description: The UNIX timestamp of the first entry (the oldest) in the database.
+ last_time_t:
+ type: number
+ description: The UNIX timestamp of the latest entry in the database.
+ charts:
+ type: object
+ description: "An object containing all the charts available for the chart. This is used as an indexed array.
+ For each pair in the dictionary, the key is the id of the chart and the value provides all details about
+ the chart."
+ alarm_variables:
+ type: object
+ properties:
+ chart:
+ type: string
+ description: The unique id of the chart.
+ chart_name:
+ type: string
+ description: The name of the chart.
+ cnart_context:
+ type: string
+ description: The context of the chart. It is shared across multiple monitored
+ software or hardware instances and used in alarm templates.
+ family:
+ type: string
+ description: The family of the chart.
+ host:
+ type: string
+ description: The host containing the chart.
+ chart_variables:
+ type: object
+ additionalProperties:
+ $ref: "#/components/schemas/chart_variables"
+ family_variables:
+ type: object
+ properties:
+ varname1:
+ type: number
+ format: float
+ varname2:
+ type: number
+ format: float
+ host_variables:
+ type: object
+ properties:
+ varname1:
+ type: number
+ format: float
+ varname2:
+ type: number
+ format: float
+ chart_variables:
+ type: object
+ properties:
+ varname1:
+ type: number
+ format: float
+ varname2:
+ type: number
+ format: float
+ data:
+ type: object
+ discriminator:
+ propertyName: format
+ description: Response will contain the appropriate subtype, e.g. data_json depending
+ on the requested format.
+ properties:
+ api:
+ type: number
+ description: The API version this conforms to, currently 1.
+ id:
+ type: string
+ description: The unique id of the chart.
+ name:
+ type: string
+ description: The name of the chart.
+ update_every:
+ type: number
+ description: The update frequency of this chart, in seconds. One value every this
+ amount of time is kept in the round robin database (independently of
+ the current view).
+ view_update_every:
+ type: number
+ description: The current view appropriate update frequency of this chart, in
+ seconds. There is no point to request chart refreshes, using the
+ same settings, more frequently than this.
+ first_entry:
+ type: number
+ description: The UNIX timestamp of the first entry (the oldest) in the round
+ robin database (independently of the current view).
+ last_entry:
+ type: number
+ description: The UNIX timestamp of the latest entry in the round robin database
+ (independently of the current view).
+ after:
+ type: number
+ description: The UNIX timestamp of the first entry (the oldest) returned in this
+ response.
+ before:
+ type: number
+ description: The UNIX timestamp of the latest entry returned in this response.
+ min:
+ type: number
+ description: The minimum value returned in the current view. This can be used to
+ size the y-series of the chart.
+ max:
+ type: number
+ description: The maximum value returned in the current view. This can be used to
+ size the y-series of the chart.
+ dimension_names:
+ description: The dimension names of the chart as returned in the current view.
+ type: array
+ items:
+ type: string
+ dimension_ids:
+ description: The dimension IDs of the chart as returned in the current view.
+ type: array
+ items:
+ type: string
+ latest_values:
+ description: The latest values collected for the chart (independently of the
+ current view).
+ type: array
+ items:
+ type: string
+ view_latest_values:
+ description: The latest values returned with this response.
+ type: array
+ items:
+ type: string
+ dimensions:
+ type: number
+ description: The number of dimensions returned.
+ points:
+ type: number
+ description: The number of rows / points returned.
+ format:
+ type: string
+ description: The format of the result returned.
+ chart_variables:
+ type: object
+ additionalProperties:
+ $ref: "#/components/schemas/chart_variables"
+ data_json:
+ description: Data response in json format.
+ allOf:
+ - $ref: "#/components/schemas/data"
+ - properties:
+ result:
+ type: object
+ properties:
+ labels:
+ description: The dimensions retrieved from the chart.
+ type: array
+ items:
+ type: string
+ data:
+ description: The data requested, one element per sample with each element
+ containing the values of the dimensions described in the
+ labels value.
+ type: array
+ items:
+ type: number
+ description: The result requested, in the format requested.
+ data_flat:
+ description: Data response in csv / tsv / tsv-excel / ssv / ssv-comma / markdown /
+ html formats.
+ allOf:
+ - $ref: "#/components/schemas/data"
+ - properties:
+ result:
+ type: string
+ data_array:
+ description: Data response in array format.
+ allOf:
+ - $ref: "#/components/schemas/data"
+ - properties:
+ result:
+ type: array
+ items:
+ type: number
+ data_csvjsonarray:
+ description: Data response in csvjsonarray format.
+ allOf:
+ - $ref: "#/components/schemas/data"
+ - properties:
+ result:
+ description: The first inner array contains strings showing the labels of
+ each column, each subsequent array contains the values for each
+ point in time.
+ type: array
+ items:
+ type: array
+ items: {}
+ data_datatable:
+ description: Data response in datatable / datasource formats (suitable for Google
+ Charts).
+ allOf:
+ - $ref: "#/components/schemas/data"
+ - properties:
+ result:
+ type: object
+ properties:
+ cols:
+ type: array
+ items:
+ type: object
+ properties:
+ id:
+ description: Always empty - for future use.
+ label:
+ description: The dimension returned from the chart.
+ pattern:
+ description: Always empty - for future use.
+ type:
+ description: The type of data in the column / chart-dimension.
+ p:
+ description: Contains any annotations for the column.
+ required:
+ - id
+ - label
+ - pattern
+ - type
+ rows:
+ type: array
+ items:
+ type: object
+ properties:
+ c:
+ type: array
+ items:
+ properties:
+ v:
+ description: "Each value in the row is represented by an
+ object named `c` with five v fields: data, null,
+ null, 0, the value. This format is fixed by the
+ Google Charts API."
+ alarms:
+ type: object
+ properties:
+ hostname:
+ type: string
+ latest_alarm_log_unique_id:
+ type: integer
+ format: int32
+ status:
+ type: boolean
+ now:
+ type: integer
+ format: int32
+ alarms:
+ type: object
+ properties:
+ chart-name.alarm-name:
+ type: object
+ properties:
+ id:
+ type: integer
+ format: int32
+ name:
+ type: string
+ description: Full alarm name.
+ chart:
+ type: string
+ family:
+ type: string
+ active:
+ type: boolean
+ description: Will be false only if the alarm is disabled in the
+ configuration.
+ disabled:
+ type: boolean
+ description: Whether the health check for this alarm has been disabled
+ via a health command API DISABLE command.
+ silenced:
+ type: boolean
+ description: Whether notifications for this alarm have been silenced via
+ a health command API SILENCE command.
+ exec:
+ type: string
+ recipient:
+ type: string
+ source:
+ type: string
+ units:
+ type: string
+ info:
+ type: string
+ status:
+ type: string
+ last_status_change:
+ type: integer
+ format: int32
+ last_updated:
+ type: integer
+ format: int32
+ next_update:
+ type: integer
+ format: int32
+ update_every:
+ type: integer
+ format: int32
+ delay_up_duration:
+ type: integer
+ format: int32
+ delay_down_duration:
+ type: integer
+ format: int32
+ delay_max_duration:
+ type: integer
+ format: int32
+ delay_multiplier:
+ type: integer
+ format: int32
+ delay:
+ type: integer
+ format: int32
+ delay_up_to_timestamp:
+ type: integer
+ format: int32
+ value_string:
+ type: string
+ no_clear_notification:
+ type: boolean
+ lookup_dimensions:
+ type: string
+ db_after:
+ type: integer
+ format: int32
+ db_before:
+ type: integer
+ format: int32
+ lookup_method:
+ type: string
+ lookup_after:
+ type: integer
+ format: int32
+ lookup_before:
+ type: integer
+ format: int32
+ lookup_options:
+ type: string
+ calc:
+ type: string
+ calc_parsed:
+ type: string
+ warn:
+ type: string
+ warn_parsed:
+ type: string
+ crit:
+ type: string
+ crit_parsed:
+ type: string
+ warn_repeat_every:
+ type: integer
+ format: int32
+ crit_repeat_every:
+ type: integer
+ format: int32
+ green:
+ type: string
+ format: nullable
+ red:
+ type: string
+ format: nullable
+ value:
+ type: number
+ alarm_log_entry:
+ type: object
+ properties:
+ hostname:
+ type: string
+ unique_id:
+ type: integer
+ format: int32
+ alarm_id:
+ type: integer
+ format: int32
+ alarm_event_id:
+ type: integer
+ format: int32
+ name:
+ type: string
+ chart:
+ type: string
+ family:
+ type: string
+ processed:
+ type: boolean
+ updated:
+ type: boolean
+ exec_run:
+ type: integer
+ format: int32
+ exec_failed:
+ type: boolean
+ exec:
+ type: string
+ recipient:
+ type: string
+ exec_code:
+ type: integer
+ format: int32
+ source:
+ type: string
+ units:
+ type: string
+ when:
+ type: integer
+ format: int32
+ duration:
+ type: integer
+ format: int32
+ non_clear_duration:
+ type: integer
+ format: int32
+ status:
+ type: string
+ old_status:
+ type: string
+ delay:
+ type: integer
+ format: int32
+ delay_up_to_timestamp:
+ type: integer
+ format: int32
+ updated_by_id:
+ type: integer
+ format: int32
+ updates_id:
+ type: integer
+ format: int32
+ value_string:
+ type: string
+ old_value_string:
+ type: string
+ silenced:
+ type: string
+ info:
+ type: string
+ value:
+ type: number
+ nullable: true
+ old_value:
+ type: number
+ nullable: true
+ alarms_values:
+ type: object
+ properties:
+ hostname:
+ type: string
+ alarms:
+ type: object
+ description: HashMap with keys being alarm names
+ additionalProperties:
+ type: object
+ properties:
+ id:
+ type: integer
+ value:
+ type: integer
+ last_updated:
+ type: integer
+ format: int32
+ status:
+ type: string
+ enum:
+ - REMOVED
+ - UNDEFINED
+ - UNINITIALIZED
+ - CLEAR
+ - RAISED
+ - WARNING
+ - CRITICAL
+ - UNKNOWN
+ aclk_state:
+ type: object
+ properties:
+ aclk-available:
+ type: string
+ description: "Describes whether this agent is capable of connection to the Cloud.
+ False means agent has been built without ACLK component either on purpose (user choice)
+ or due to missing dependency."
+ aclk-version:
+ type: integer
+ description: Describes which ACLK version is currently used.
+ protocols-supported:
+ type: array
+ description: List of supported protocols for communication with Cloud.
+ items:
+ type: string
+ agent-claimed:
+ type: boolean
+ description: Informs whether this agent has been added to a space in the cloud (User has to perform claiming).
+ If false (user didn't perform claiming) agent will never attempt any cloud connection.
+ claimed_id:
+ type: string
+ format: uuid
+ description: Unique ID this agent uses to identify when connecting to cloud
+ online:
+ type: boolean
+ description: Informs if this agent was connected to the cloud at the time this request has been processed.
+ used-cloud-protocol:
+ type: string
+ description: Informs which protocol is used to communicate with cloud
+ enum:
+ - Old
+ - New
+ metric_correlations:
+ type: object
+ properties:
+ after:
+ description: the start time of the highlighted window
+ type: integer
+ before:
+ description: the end time of the highlighted window
+ type: integer
+ duration:
+ description: the duration of the highlighted window
+ type: integer
+ points:
+ description: the points of the highlighted window
+ type: integer
+ baseline_after:
+ description: the start time of the baseline window
+ type: integer
+ baseline_before:
+ description: the end time of the baseline window
+ type: integer
+ baseline_duration:
+ description: the duration of the baseline window
+ type: integer
+ baseline_points:
+ description: the points of the baseline window
+ type: integer
+ group:
+ description: the grouping method across time
+ type: string
+ method:
+ description: the correlation method used
+ type: string
+ options:
+ description: a comma separated list of the query options set
+ type: string
+ correlated_dimensions:
+ description: the number of dimensions returned in the result
+ total_dimensions_count:
+ description: the total number of dimensions evaluated
+ type: integer
+ statistics:
+ type: object
+ properties:
+ query_time_ms:
+ type: number
+ db_queries:
+ type: integer
+ db_points_read:
+ type: integer
+ query_result_points:
+ type: integer
+ binary_searches:
+ type: integer
+ correlated_charts:
+ type: object
+ description: An object containing chart objects with their metrics correlations.
+ properties:
+ chart-id1:
+ type: object
+ properties:
+ context:
+ type: string
+ dimensions:
+ type: object
+ properties:
+ dimension1-name:
+ type: number
+ dimension2-name:
+ type: number
+ chart-id2:
+ type: object
+ properties:
+ context:
+ type: string
+ dimensions:
+ type: object
+ properties:
+ dimension1-name:
+ type: number
+ dimension2-name:
+ type: number
+ weights:
+ type: object
+ properties:
+ after:
+ description: the start time of the highlighted window
+ type: integer
+ before:
+ description: the end time of the highlighted window
+ type: integer
+ duration:
+ description: the duration of the highlighted window
+ type: integer
+ points:
+ description: the points of the highlighted window
+ type: integer
+ baseline_after:
+ description: the start time of the baseline window
+ type: integer
+ baseline_before:
+ description: the end time of the baseline window
+ type: integer
+ baseline_duration:
+ description: the duration of the baseline window
+ type: integer
+ baseline_points:
+ description: the points of the baseline window
+ type: integer
+ group:
+ description: the grouping method across time
+ type: string
+ method:
+ description: the correlation method used
+ type: string
+ options:
+ description: a comma separated list of the query options set
+ type: string
+ correlated_dimensions:
+ description: the number of dimensions returned in the result
+ total_dimensions_count:
+ description: the total number of dimensions evaluated
+ type: integer
+ statistics:
+ type: object
+ properties:
+ query_time_ms:
+ type: number
+ db_queries:
+ type: integer
+ db_points_read:
+ type: integer
+ query_result_points:
+ type: integer
+ binary_searches:
+ type: integer
+ contexts:
+ description: A dictionary of weighted context objects.
+ type: object
+ additionalProperties:
+ $ref: '#/components/schemas/weighted_context'
+ weighted_context:
+ type: object
+ properties:
+ weight:
+ description: The average weight of the context.
+ type: number
+ charts:
+ description: A dictionary of weighted chart objects.
+ type: object
+ additionalProperties:
+ $ref: '#/components/schemas/weighted_chart'
+ weighted_chart:
+ type: object
+ properties:
+ weight:
+ description: The average weight of the context.
+ type: number
+ dimensions:
+ description: A dictionary of weighted dimensions.
+ type: object
+ additionalProperties:
+ $ref: '#/components/schemas/weighted_dimension'
+ weighted_dimension:
+ type: number
diff --git a/web/api/queries/Makefile.am b/web/api/queries/Makefile.am
new file mode 100644
index 0000000..7c4c435
--- /dev/null
+++ b/web/api/queries/Makefile.am
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+SUBDIRS = \
+ average \
+ countif \
+ des \
+ incremental_sum \
+ max \
+ min \
+ sum \
+ median \
+ percentile \
+ ses \
+ stddev \
+ trimmed_mean \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/queries/README.md b/web/api/queries/README.md
new file mode 100644
index 0000000..44cdd05
--- /dev/null
+++ b/web/api/queries/README.md
@@ -0,0 +1,176 @@
+<!--
+title: "Database Queries"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/README.md
+-->
+
+# Database Queries
+
+Netdata database can be queried with `/api/v1/data` and `/api/v1/badge.svg` REST API methods.
+
+Every data query accepts the following parameters:
+
+|name|required|description|
+|:--:|:------:|:----------|
+|`chart`|yes|The chart to be queried.|
+|`points`|no|The number of points to be returned. Netdata can reduce number of points by applying query grouping methods. If not given, the result will have the same granularity as the database (although this relates to `gtime`).|
+|`before`|no|The absolute timestamp or the relative (to now) time the query should finish evaluating data. If not given, it defaults to the timestamp of the latest point in the database.|
+|`after`|no|The absolute timestamp or the relative (to `before`) time the query should start evaluating data. if not given, it defaults to the timestamp of the oldest point in the database.|
+|`group`|no|The grouping method to use when reducing the points the database has. If not given, it defaults to `average`.|
+|`gtime`|no|A resampling period to change the units of the metrics (i.e. setting this to `60` will convert `per second` metrics to `per minute`. If not given it defaults to granularity of the database.|
+|`options`|no|A bitmap of options that can affect the operation of the query. Only 2 options are used by the query engine: `unaligned` and `percentage`. All the other options are used by the output formatters. The default is to return aligned data.|
+|`dimensions`|no|A simple pattern to filter the dimensions to be queried. The default is to return all the dimensions of the chart.|
+
+## Operation
+
+The query engine works as follows (in this order):
+
+#### Time-frame
+
+`after` and `before` define a time-frame, accepting:
+
+- **absolute timestamps** (unix timestamps, i.e. seconds since epoch).
+
+- **relative timestamps**:
+
+ `before` is relative to now and `after` is relative to `before`.
+
+ Example: `before=-60&after=-60` evaluates to the time-frame from -120 up to -60 seconds in
+ the past, relative to the latest entry of the database of the chart.
+
+The engine verifies that the time-frame requested is available at the database:
+
+- If the requested time-frame overlaps with the database, the excess requested
+ will be truncated.
+
+- If the requested time-frame does not overlap with the database, the engine will
+ return an empty data set.
+
+At the end of this operation, `after` and `before` are absolute timestamps.
+
+#### Data grouping
+
+Database points grouping is applied when the caller requests a time-frame to be
+expressed with fewer points, compared to what is available at the database.
+
+There are 2 uses that enable this feature:
+
+- The caller requests a specific number of `points` to be returned.
+
+ For example, for a time-frame of 10 minutes, the database has 600 points (1/sec),
+ while the caller requested these 10 minutes to be expressed in 200 points.
+
+ This feature is used by Netdata dashboards when you zoom-out the charts.
+ The dashboard is requesting the number of points the user's screen has.
+ This saves bandwidth and speeds up the browser (fewer points to evaluate for drawing the charts).
+- The caller requests a **re-sampling** of the database, by setting `gtime` to any value
+ above the granularity of the chart.
+
+ For example, the chart's units is `requests/sec` and caller wants `requests/min`.
+
+Using `points` and `gtime` the query engine tries to find a best fit for **database-points**
+vs **result-points** (we call this ratio `group points`). It always tries to keep `group points`
+an integer. Keep in mind the query engine may shift `after` if required. See also the [example](#example).
+
+#### Time-frame Alignment
+
+Alignment is a very important aspect of Netdata queries. Without it, the animated
+charts on the dashboards would constantly [change shape](#example) during incremental updates.
+
+To provide consistent grouping through time, the query engine (by default) aligns
+`after` and `before` to be a multiple of `group points`.
+
+For example, if `group points` is 60 and alignment is enabled, the engine will return
+each point with durations XX:XX:00 - XX:XX:59, matching whole minutes.
+
+To disable alignment, pass `&options=unaligned` to the query.
+
+#### Query Execution
+
+To execute the query, the engine evaluates all dimensions of the chart, one after another.
+
+The engine does not evaluate dimensions that do not match the [simple pattern](/libnetdata/simple_pattern/README.md)
+given at the `dimensions` parameter, except when `options=percentage` is given (this option
+requires all the dimensions to be evaluated to find the percentage of each dimension vs to chart
+total).
+
+For each dimension, it starts evaluating values starting at `after` (not inclusive) towards
+`before` (inclusive).
+
+For each value it calls the **grouping method** given with the `&group=` query parameter
+(the default is `average`).
+
+## Grouping methods
+
+The following grouping methods are supported. These are given all the values in the time-frame
+and they group the values every `group points`.
+
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min&value_color=blue) finds the minimum value
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max&value_color=lightblue) finds the maximum value
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average&value_color=yellow) finds the average value
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=sum&after=-60&label=sum&units=requests&value_color=orange) adds all the values and returns the sum
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=median&after=-60&label=median&value_color=red) sorts the values and returns the value in the middle of the list
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=stddev&after=-60&label=stddev&value_color=green) finds the standard deviation of the values
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=cv&after=-60&label=cv&units=pcent&value_color=yellow) finds the relative standard deviation (coefficient of variation) of the values
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=ses&after=-60&label=ses&value_color=brown) finds the exponential weighted moving average of the values
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=des&after=-60&label=des&value_color=blue) applies Holt-Winters double exponential smoothing
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=incremental_sum&after=-60&label=incremental_sum&value_color=red) finds the difference of the last vs the first value
+
+The examples shown above, are live information from the `successful` web requests of the global Netdata registry.
+
+## Further processing
+
+The result of the query engine is always a structure that has dimensions and values
+for each dimension.
+
+Formatting modules are then used to convert this result in many different formats and return it
+to the caller.
+
+## Performance
+
+The query engine is highly optimized for speed. Most of its modules implement "online"
+versions of the algorithms, requiring just one pass on the database values to produce
+the result.
+
+## Example
+
+When Netdata is reducing metrics, it tries to return always the same boundaries. So, if we want 10s averages, it will always return points starting at a `unix timestamp % 10 = 0`.
+
+Let's see why this is needed, by looking at the error case.
+
+Assume we have 5 points:
+
+|time|value|
+|:--:|:---:|
+|00:01|1|
+|00:02|2|
+|00:03|3|
+|00:04|4|
+|00:05|5|
+
+At 00:04 you ask for 2 points for 4 seconds in the past. So `group = 2`. Netdata would return:
+
+|point|time|value|
+|:---:|:--:|:---:|
+|1|00:01 - 00:02|1.5|
+|2|00:03 - 00:04|3.5|
+
+A second later the chart is to be refreshed, and makes again the same request at 00:05. These are the points that would have been returned:
+
+|point|time|value|
+|:---:|:--:|:---:|
+|1|00:02 - 00:03|2.5|
+|2|00:04 - 00:05|4.5|
+
+**Wait a moment!** The chart was shifted just one point and it changed value! Point 2 was 3.5 and when shifted to point 1 is 2.5! If you see this in a chart, it's a mess. The charts change shape constantly.
+
+For this reason, Netdata always aligns the data it returns to the `group`.
+
+When you request `points=1`, Netdata understands that you need 1 point for the whole database, so `group = 3600`. Then it tries to find the starting point which would be `timestamp % 3600 = 0` Within a database of 3600 seconds, there is one such point for sure. Then it tries to find the average of 3600 points. But, most probably it will not find 3600 of them (for just 1 out of 3600 seconds this query will return something).
+
+So, the proper way to query the database is to also set at least `after`. The following call will returns 1 point for the last complete 10-second duration (it starts at `timestamp % 10 = 0`):
+
+<http://netdata.firehol.org/api/v1/data?chart=system.cpu&points=1&after=-10&options=seconds>
+
+When you keep calling this URL, you will see that it returns one new value every 10 seconds, and the timestamp always ends with zero. Similarly, if you say `points=1&after=-5` it will always return timestamps ending with 0 or 5.
+
+
diff --git a/web/api/queries/average/Makefile.am b/web/api/queries/average/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/web/api/queries/average/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/queries/average/README.md b/web/api/queries/average/README.md
new file mode 100644
index 0000000..b8d4ba7
--- /dev/null
+++ b/web/api/queries/average/README.md
@@ -0,0 +1,46 @@
+<!--
+title: "Average or Mean"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/average/README.md
+-->
+
+# Average or Mean
+
+> This query is available as `average` and `mean`.
+
+An average is a single number taken as representative of a list of numbers.
+
+It is calculated as:
+
+```
+average = sum(numbers) / count(numbers)
+```
+
+## how to use
+
+Use it in alarms like this:
+
+```
+ alarm: my_alarm
+ on: my_chart
+lookup: average -1m unaligned of my_dimension
+ warn: $this > 1000
+```
+
+`average` does not change the units. For example, if the chart units is `requests/sec`, the result
+will be again expressed in the same units.
+
+It can also be used in APIs and badges as `&group=average` in the URL.
+
+## Examples
+
+Examining last 1 minute `successful` web server responses:
+
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average&value_color=orange)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max)
+
+## References
+
+- <https://en.wikipedia.org/wiki/Average>.
+
+
diff --git a/web/api/queries/average/average.c b/web/api/queries/average/average.c
new file mode 100644
index 0000000..0719d57
--- /dev/null
+++ b/web/api/queries/average/average.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "average.h"
+
+// ----------------------------------------------------------------------------
+// average
+
+struct grouping_average {
+ NETDATA_DOUBLE sum;
+ size_t count;
+};
+
+void grouping_create_average(RRDR *r, const char *options __maybe_unused) {
+ r->internal.grouping_data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_average));
+}
+
+// resets when switches dimensions
+// so, clear everything to restart
+void grouping_reset_average(RRDR *r) {
+ struct grouping_average *g = (struct grouping_average *)r->internal.grouping_data;
+ g->sum = 0;
+ g->count = 0;
+}
+
+void grouping_free_average(RRDR *r) {
+ onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
+ r->internal.grouping_data = NULL;
+}
+
+void grouping_add_average(RRDR *r, NETDATA_DOUBLE value) {
+ struct grouping_average *g = (struct grouping_average *)r->internal.grouping_data;
+ g->sum += value;
+ g->count++;
+}
+
+NETDATA_DOUBLE grouping_flush_average(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct grouping_average *g = (struct grouping_average *)r->internal.grouping_data;
+
+ NETDATA_DOUBLE value;
+
+ if(unlikely(!g->count)) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ else {
+ if(unlikely(r->internal.resampling_group != 1)) {
+ if (unlikely(r->result_options & RRDR_RESULT_OPTION_VARIABLE_STEP))
+ value = g->sum / g->count / r->internal.resampling_divisor;
+ else
+ value = g->sum / r->internal.resampling_divisor;
+ } else
+ value = g->sum / g->count;
+ }
+
+ g->sum = 0.0;
+ g->count = 0;
+
+ return value;
+}
diff --git a/web/api/queries/average/average.h b/web/api/queries/average/average.h
new file mode 100644
index 0000000..b319668
--- /dev/null
+++ b/web/api/queries/average/average.h
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_QUERY_AVERAGE_H
+#define NETDATA_API_QUERY_AVERAGE_H
+
+#include "../query.h"
+#include "../rrdr.h"
+
+void grouping_create_average(RRDR *r, const char *options __maybe_unused);
+void grouping_reset_average(RRDR *r);
+void grouping_free_average(RRDR *r);
+void grouping_add_average(RRDR *r, NETDATA_DOUBLE value);
+NETDATA_DOUBLE grouping_flush_average(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+
+#endif //NETDATA_API_QUERY_AVERAGE_H
diff --git a/web/api/queries/countif/Makefile.am b/web/api/queries/countif/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/web/api/queries/countif/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/queries/countif/README.md b/web/api/queries/countif/README.md
new file mode 100644
index 0000000..200a4c9
--- /dev/null
+++ b/web/api/queries/countif/README.md
@@ -0,0 +1,36 @@
+<!--
+title: "CountIf"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/countif/README.md
+-->
+
+# CountIf
+
+> This query is available as `countif`.
+
+CountIf returns the percentage of points in the database that satisfy the condition supplied.
+
+The following conditions are available:
+
+- `!` or `!=` or `<>`, different than
+- `=` or `:`, equal to
+- `>`, greater than
+- `<`, less than
+- `>=`, greater or equal to
+- `<=`, less or equal to
+
+The target number and the desired condition can be set using the `group_options` query parameter, as a string, like in these examples:
+
+- `!0`, to match any number except zero.
+- `>=-3` to match any number bigger or equal to -3.
+
+. When an invalid condition is given, the web server can deliver a not accurate response.
+
+## how to use
+
+This query cannot be used in alarms.
+
+`countif` changes the units of charts. The result of the calculation is always from zero to 1, expressing the percentage of database points that matched the condition.
+
+In APIs and badges can be used like this: `&group=countif&group_options=>10` in the URL.
+
+
diff --git a/web/api/queries/countif/countif.c b/web/api/queries/countif/countif.c
new file mode 100644
index 0000000..369b20b
--- /dev/null
+++ b/web/api/queries/countif/countif.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "countif.h"
+
+// ----------------------------------------------------------------------------
+// countif
+
+struct grouping_countif {
+ size_t (*comparison)(NETDATA_DOUBLE, NETDATA_DOUBLE);
+ NETDATA_DOUBLE target;
+ size_t count;
+ size_t matched;
+};
+
+static size_t countif_equal(NETDATA_DOUBLE v, NETDATA_DOUBLE target) {
+ return (v == target);
+}
+
+static size_t countif_notequal(NETDATA_DOUBLE v, NETDATA_DOUBLE target) {
+ return (v != target);
+}
+
+static size_t countif_less(NETDATA_DOUBLE v, NETDATA_DOUBLE target) {
+ return (v < target);
+}
+
+static size_t countif_lessequal(NETDATA_DOUBLE v, NETDATA_DOUBLE target) {
+ return (v <= target);
+}
+
+static size_t countif_greater(NETDATA_DOUBLE v, NETDATA_DOUBLE target) {
+ return (v > target);
+}
+
+static size_t countif_greaterequal(NETDATA_DOUBLE v, NETDATA_DOUBLE target) {
+ return (v >= target);
+}
+
+void grouping_create_countif(RRDR *r, const char *options __maybe_unused) {
+ struct grouping_countif *g = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_countif));
+ r->internal.grouping_data = g;
+
+ if(options && *options) {
+ // skip any leading spaces
+ while(isspace(*options)) options++;
+
+ // find the comparison function
+ switch(*options) {
+ case '!':
+ options++;
+ if(*options != '=' && *options != ':')
+ options--;
+ g->comparison = countif_notequal;
+ break;
+
+ case '>':
+ options++;
+ if(*options == '=' || *options == ':') {
+ g->comparison = countif_greaterequal;
+ }
+ else {
+ options--;
+ g->comparison = countif_greater;
+ }
+ break;
+
+ case '<':
+ options++;
+ if(*options == '>') {
+ g->comparison = countif_notequal;
+ }
+ else if(*options == '=' || *options == ':') {
+ g->comparison = countif_lessequal;
+ }
+ else {
+ options--;
+ g->comparison = countif_less;
+ }
+ break;
+
+ default:
+ case '=':
+ case ':':
+ g->comparison = countif_equal;
+ break;
+ }
+ if(*options) options++;
+
+ // skip everything up to the first digit
+ while(isspace(*options)) options++;
+
+ g->target = str2ndd(options, NULL);
+ }
+ else {
+ g->target = 0.0;
+ g->comparison = countif_equal;
+ }
+}
+
+// resets when switches dimensions
+// so, clear everything to restart
+void grouping_reset_countif(RRDR *r) {
+ struct grouping_countif *g = (struct grouping_countif *)r->internal.grouping_data;
+ g->matched = 0;
+ g->count = 0;
+}
+
+void grouping_free_countif(RRDR *r) {
+ onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
+ r->internal.grouping_data = NULL;
+}
+
+void grouping_add_countif(RRDR *r, NETDATA_DOUBLE value) {
+ struct grouping_countif *g = (struct grouping_countif *)r->internal.grouping_data;
+ g->matched += g->comparison(value, g->target);
+ g->count++;
+}
+
+NETDATA_DOUBLE grouping_flush_countif(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct grouping_countif *g = (struct grouping_countif *)r->internal.grouping_data;
+
+ NETDATA_DOUBLE value;
+
+ if(unlikely(!g->count)) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ else {
+ value = (NETDATA_DOUBLE)g->matched * 100 / (NETDATA_DOUBLE)g->count;
+ }
+
+ g->matched = 0;
+ g->count = 0;
+
+ return value;
+}
diff --git a/web/api/queries/countif/countif.h b/web/api/queries/countif/countif.h
new file mode 100644
index 0000000..dfe8056
--- /dev/null
+++ b/web/api/queries/countif/countif.h
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_QUERY_COUNTIF_H
+#define NETDATA_API_QUERY_COUNTIF_H
+
+#include "../query.h"
+#include "../rrdr.h"
+
+void grouping_create_countif(RRDR *r, const char *options __maybe_unused);
+void grouping_reset_countif(RRDR *r);
+void grouping_free_countif(RRDR *r);
+void grouping_add_countif(RRDR *r, NETDATA_DOUBLE value);
+NETDATA_DOUBLE grouping_flush_countif(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+
+#endif //NETDATA_API_QUERY_COUNTIF_H
diff --git a/web/api/queries/des/Makefile.am b/web/api/queries/des/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/web/api/queries/des/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/queries/des/README.md b/web/api/queries/des/README.md
new file mode 100644
index 0000000..33c5f1a
--- /dev/null
+++ b/web/api/queries/des/README.md
@@ -0,0 +1,73 @@
+<!--
+title: "double exponential smoothing"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/des/README.md
+-->
+
+# double exponential smoothing
+
+Exponential smoothing is one of many window functions commonly applied to smooth data in signal
+processing, acting as low-pass filters to remove high frequency noise.
+
+Simple exponential smoothing does not do well when there is a trend in the data.
+In such situations, several methods were devised under the name "double exponential smoothing"
+or "second-order exponential smoothing.", which is the recursive application of an exponential
+filter twice, thus being termed "double exponential smoothing".
+
+In simple terms, this is like an average value, but more recent values are given more weight
+and the trend of the values influences significantly the result.
+
+> **IMPORTANT**
+>
+> It is common for `des` to provide "average" values that far beyond the minimum or the maximum
+> values found in the time-series.
+> `des` estimates these values because of it takes into account the trend.
+
+This module implements the "Holt-Winters double exponential smoothing".
+
+Netdata automatically adjusts the weight (`alpha`) and the trend (`beta`) based on the number
+of values processed, using the formula:
+
+```
+window = max(number of values, 15)
+alpha = 2 / (window + 1)
+beta = 2 / (window + 1)
+```
+
+You can change the fixed value `15` by setting in `netdata.conf`:
+
+```
+[web]
+ des max window = 15
+```
+
+## how to use
+
+Use it in alarms like this:
+
+```
+ alarm: my_alarm
+ on: my_chart
+lookup: des -1m unaligned of my_dimension
+ warn: $this > 1000
+```
+
+`des` does not change the units. For example, if the chart units is `requests/sec`, the result
+will be again expressed in the same units.
+
+It can also be used in APIs and badges as `&group=des` in the URL.
+
+## Examples
+
+Examining last 1 minute `successful` web server responses:
+
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average&value_color=yellow)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=ses&after=-60&label=single+exponential+smoothing&value_color=yellow)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=des&after=-60&label=double+exponential+smoothing&value_color=orange)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max)
+
+## References
+
+- <https://en.wikipedia.org/wiki/Exponential_smoothing>.
+
+
diff --git a/web/api/queries/des/des.c b/web/api/queries/des/des.c
new file mode 100644
index 0000000..a6c4e40
--- /dev/null
+++ b/web/api/queries/des/des.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include <web/api/queries/rrdr.h>
+#include "des.h"
+
+
+// ----------------------------------------------------------------------------
+// single exponential smoothing
+
+struct grouping_des {
+ NETDATA_DOUBLE alpha;
+ NETDATA_DOUBLE alpha_other;
+ NETDATA_DOUBLE beta;
+ NETDATA_DOUBLE beta_other;
+
+ NETDATA_DOUBLE level;
+ NETDATA_DOUBLE trend;
+
+ size_t count;
+};
+
+static size_t max_window_size = 15;
+
+void grouping_init_des(void) {
+ long long ret = config_get_number(CONFIG_SECTION_WEB, "des max window", (long long)max_window_size);
+ if(ret <= 1) {
+ config_set_number(CONFIG_SECTION_WEB, "des max window", (long long)max_window_size);
+ }
+ else {
+ max_window_size = (size_t) ret;
+ }
+}
+
+static inline NETDATA_DOUBLE window(RRDR *r, struct grouping_des *g) {
+ (void)g;
+
+ NETDATA_DOUBLE points;
+ if(r->group == 1) {
+ // provide a running DES
+ points = (NETDATA_DOUBLE)r->internal.points_wanted;
+ }
+ else {
+ // provide a SES with flush points
+ points = (NETDATA_DOUBLE)r->group;
+ }
+
+ // https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average
+ // A commonly used value for alpha is 2 / (N + 1)
+ return (points > (NETDATA_DOUBLE)max_window_size) ? (NETDATA_DOUBLE)max_window_size : points;
+}
+
+static inline void set_alpha(RRDR *r, struct grouping_des *g) {
+ // https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average
+ // A commonly used value for alpha is 2 / (N + 1)
+
+ g->alpha = 2.0 / (window(r, g) + 1.0);
+ g->alpha_other = 1.0 - g->alpha;
+
+ //info("alpha for chart '%s' is " CALCULATED_NUMBER_FORMAT, r->st->name, g->alpha);
+}
+
+static inline void set_beta(RRDR *r, struct grouping_des *g) {
+ // https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average
+ // A commonly used value for alpha is 2 / (N + 1)
+
+ g->beta = 2.0 / (window(r, g) + 1.0);
+ g->beta_other = 1.0 - g->beta;
+
+ //info("beta for chart '%s' is " CALCULATED_NUMBER_FORMAT, r->st->name, g->beta);
+}
+
+void grouping_create_des(RRDR *r, const char *options __maybe_unused) {
+ struct grouping_des *g = (struct grouping_des *)onewayalloc_mallocz(r->internal.owa, sizeof(struct grouping_des));
+ set_alpha(r, g);
+ set_beta(r, g);
+ g->level = 0.0;
+ g->trend = 0.0;
+ g->count = 0;
+ r->internal.grouping_data = g;
+}
+
+// resets when switches dimensions
+// so, clear everything to restart
+void grouping_reset_des(RRDR *r) {
+ struct grouping_des *g = (struct grouping_des *)r->internal.grouping_data;
+ g->level = 0.0;
+ g->trend = 0.0;
+ g->count = 0;
+
+ // fprintf(stderr, "\nDES: ");
+
+}
+
+void grouping_free_des(RRDR *r) {
+ onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
+ r->internal.grouping_data = NULL;
+}
+
+void grouping_add_des(RRDR *r, NETDATA_DOUBLE value) {
+ struct grouping_des *g = (struct grouping_des *)r->internal.grouping_data;
+
+ if(likely(g->count > 0)) {
+ // we have at least a number so far
+
+ if(unlikely(g->count == 1)) {
+ // the second value we got
+ g->trend = value - g->trend;
+ g->level = value;
+ }
+
+ // for the values, except the first
+ NETDATA_DOUBLE last_level = g->level;
+ g->level = (g->alpha * value) + (g->alpha_other * (g->level + g->trend));
+ g->trend = (g->beta * (g->level - last_level)) + (g->beta_other * g->trend);
+ }
+ else {
+ // the first value we got
+ g->level = g->trend = value;
+ }
+
+ g->count++;
+
+ //fprintf(stderr, "value: " CALCULATED_NUMBER_FORMAT ", level: " CALCULATED_NUMBER_FORMAT ", trend: " CALCULATED_NUMBER_FORMAT "\n", value, g->level, g->trend);
+}
+
+NETDATA_DOUBLE grouping_flush_des(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct grouping_des *g = (struct grouping_des *)r->internal.grouping_data;
+
+ if(unlikely(!g->count || !netdata_double_isnumber(g->level))) {
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ return 0.0;
+ }
+
+ //fprintf(stderr, " RESULT for %zu values = " CALCULATED_NUMBER_FORMAT " \n", g->count, g->level);
+
+ return g->level;
+}
diff --git a/web/api/queries/des/des.h b/web/api/queries/des/des.h
new file mode 100644
index 0000000..05fa01b
--- /dev/null
+++ b/web/api/queries/des/des.h
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_QUERIES_DES_H
+#define NETDATA_API_QUERIES_DES_H
+
+#include "../query.h"
+#include "../rrdr.h"
+
+void grouping_init_des(void);
+
+void grouping_create_des(RRDR *r, const char *options __maybe_unused);
+void grouping_reset_des(RRDR *r);
+void grouping_free_des(RRDR *r);
+void grouping_add_des(RRDR *r, NETDATA_DOUBLE value);
+NETDATA_DOUBLE grouping_flush_des(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+
+#endif //NETDATA_API_QUERIES_DES_H
diff --git a/web/api/queries/incremental_sum/Makefile.am b/web/api/queries/incremental_sum/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/web/api/queries/incremental_sum/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/queries/incremental_sum/README.md b/web/api/queries/incremental_sum/README.md
new file mode 100644
index 0000000..4430117
--- /dev/null
+++ b/web/api/queries/incremental_sum/README.md
@@ -0,0 +1,41 @@
+<!--
+title: "Incremental Sum (`incremental_sum`)"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/incremental_sum/README.md
+-->
+
+# Incremental Sum (`incremental_sum`)
+
+This modules finds the incremental sum of a period, which `last value - first value`.
+
+The result may be positive (rising) or negative (falling) depending on the first and last values.
+
+## how to use
+
+Use it in alarms like this:
+
+```
+ alarm: my_alarm
+ on: my_chart
+lookup: incremental_sum -1m unaligned of my_dimension
+ warn: $this > 1000
+```
+
+`incremental_sum` does not change the units. For example, if the chart units is `requests/sec`, the result
+will be again expressed in the same units.
+
+It can also be used in APIs and badges as `&group=incremental_sum` in the URL.
+
+## Examples
+
+Examining last 1 minute `successful` web server responses:
+
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=incremental_sum&after=-60&label=incremental+sum&value_color=orange)
+
+## References
+
+- none
+
+
diff --git a/web/api/queries/incremental_sum/incremental_sum.c b/web/api/queries/incremental_sum/incremental_sum.c
new file mode 100644
index 0000000..afca530
--- /dev/null
+++ b/web/api/queries/incremental_sum/incremental_sum.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "incremental_sum.h"
+
+// ----------------------------------------------------------------------------
+// incremental sum
+
+struct grouping_incremental_sum {
+ NETDATA_DOUBLE first;
+ NETDATA_DOUBLE last;
+ size_t count;
+};
+
+void grouping_create_incremental_sum(RRDR *r, const char *options __maybe_unused) {
+ r->internal.grouping_data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_incremental_sum));
+}
+
+// resets when switches dimensions
+// so, clear everything to restart
+void grouping_reset_incremental_sum(RRDR *r) {
+ struct grouping_incremental_sum *g = (struct grouping_incremental_sum *)r->internal.grouping_data;
+ g->first = 0;
+ g->last = 0;
+ g->count = 0;
+}
+
+void grouping_free_incremental_sum(RRDR *r) {
+ onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
+ r->internal.grouping_data = NULL;
+}
+
+void grouping_add_incremental_sum(RRDR *r, NETDATA_DOUBLE value) {
+ struct grouping_incremental_sum *g = (struct grouping_incremental_sum *)r->internal.grouping_data;
+
+ if(unlikely(!g->count)) {
+ g->first = value;
+ g->count++;
+ }
+ else {
+ g->last = value;
+ g->count++;
+ }
+}
+
+NETDATA_DOUBLE grouping_flush_incremental_sum(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct grouping_incremental_sum *g = (struct grouping_incremental_sum *)r->internal.grouping_data;
+
+ NETDATA_DOUBLE value;
+
+ if(unlikely(!g->count)) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ else if(unlikely(g->count == 1)) {
+ value = 0.0;
+ }
+ else {
+ value = g->last - g->first;
+ }
+
+ g->first = 0.0;
+ g->last = 0.0;
+ g->count = 0;
+
+ return value;
+}
diff --git a/web/api/queries/incremental_sum/incremental_sum.h b/web/api/queries/incremental_sum/incremental_sum.h
new file mode 100644
index 0000000..c24507f
--- /dev/null
+++ b/web/api/queries/incremental_sum/incremental_sum.h
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_QUERY_INCREMENTAL_SUM_H
+#define NETDATA_API_QUERY_INCREMENTAL_SUM_H
+
+#include "../query.h"
+#include "../rrdr.h"
+
+void grouping_create_incremental_sum(RRDR *r, const char *options __maybe_unused);
+void grouping_reset_incremental_sum(RRDR *r);
+void grouping_free_incremental_sum(RRDR *r);
+void grouping_add_incremental_sum(RRDR *r, NETDATA_DOUBLE value);
+NETDATA_DOUBLE grouping_flush_incremental_sum(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+
+#endif //NETDATA_API_QUERY_INCREMENTAL_SUM_H
diff --git a/web/api/queries/max/Makefile.am b/web/api/queries/max/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/web/api/queries/max/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/queries/max/README.md b/web/api/queries/max/README.md
new file mode 100644
index 0000000..48da7cf
--- /dev/null
+++ b/web/api/queries/max/README.md
@@ -0,0 +1,38 @@
+<!--
+title: "Max"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/max/README.md
+-->
+
+# Max
+
+This module finds the max value in the time-frame given.
+
+## how to use
+
+Use it in alarms like this:
+
+```
+ alarm: my_alarm
+ on: my_chart
+lookup: max -1m unaligned of my_dimension
+ warn: $this > 1000
+```
+
+`max` does not change the units. For example, if the chart units is `requests/sec`, the result
+will be again expressed in the same units.
+
+It can also be used in APIs and badges as `&group=max` in the URL.
+
+## Examples
+
+Examining last 1 minute `successful` web server responses:
+
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max&value_color=orange)
+
+## References
+
+- <https://en.wikipedia.org/wiki/Sample_maximum_and_minimum>.
+
+
diff --git a/web/api/queries/max/max.c b/web/api/queries/max/max.c
new file mode 100644
index 0000000..73cf9fa
--- /dev/null
+++ b/web/api/queries/max/max.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "max.h"
+
+// ----------------------------------------------------------------------------
+// max
+
+struct grouping_max {
+ NETDATA_DOUBLE max;
+ size_t count;
+};
+
+void grouping_create_max(RRDR *r, const char *options __maybe_unused) {
+ r->internal.grouping_data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_max));
+}
+
+// resets when switches dimensions
+// so, clear everything to restart
+void grouping_reset_max(RRDR *r) {
+ struct grouping_max *g = (struct grouping_max *)r->internal.grouping_data;
+ g->max = 0;
+ g->count = 0;
+}
+
+void grouping_free_max(RRDR *r) {
+ onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
+ r->internal.grouping_data = NULL;
+}
+
+void grouping_add_max(RRDR *r, NETDATA_DOUBLE value) {
+ struct grouping_max *g = (struct grouping_max *)r->internal.grouping_data;
+
+ if(!g->count || fabsndd(value) > fabsndd(g->max)) {
+ g->max = value;
+ g->count++;
+ }
+}
+
+NETDATA_DOUBLE grouping_flush_max(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct grouping_max *g = (struct grouping_max *)r->internal.grouping_data;
+
+ NETDATA_DOUBLE value;
+
+ if(unlikely(!g->count)) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ else {
+ value = g->max;
+ }
+
+ g->max = 0.0;
+ g->count = 0;
+
+ return value;
+}
+
diff --git a/web/api/queries/max/max.h b/web/api/queries/max/max.h
new file mode 100644
index 0000000..e2427d2
--- /dev/null
+++ b/web/api/queries/max/max.h
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_QUERY_MAX_H
+#define NETDATA_API_QUERY_MAX_H
+
+#include "../query.h"
+#include "../rrdr.h"
+
+void grouping_create_max(RRDR *r, const char *options __maybe_unused);
+void grouping_reset_max(RRDR *r);
+void grouping_free_max(RRDR *r);
+void grouping_add_max(RRDR *r, NETDATA_DOUBLE value);
+NETDATA_DOUBLE grouping_flush_max(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+
+#endif //NETDATA_API_QUERY_MAX_H
diff --git a/web/api/queries/median/Makefile.am b/web/api/queries/median/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/web/api/queries/median/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/queries/median/README.md b/web/api/queries/median/README.md
new file mode 100644
index 0000000..5600284
--- /dev/null
+++ b/web/api/queries/median/README.md
@@ -0,0 +1,60 @@
+<!--
+title: "Median"
+description: "Use median in API queries and health entities to find the 'middle' value from a sample, eliminating any unwanted spikes in the returned metrics."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/median/README.md
+-->
+
+# Median
+
+The median is the value separating the higher half from the lower half of a data sample
+(a population or a probability distribution). For a data set, it may be thought of as the
+"middle" value.
+
+`median` is not an accurate average. However, it eliminates all spikes, by sorting
+all the values in a period, and selecting the value in the middle of the sorted array.
+
+Netdata also supports `trimmed-median`, which trims a percentage of the smaller and bigger values prior to finding the
+median. The following `trimmed-median` functions are defined:
+
+- `trimmed-median1`
+- `trimmed-median2`
+- `trimmed-median3`
+- `trimmed-median5`
+- `trimmed-median10`
+- `trimmed-median15`
+- `trimmed-median20`
+- `trimmed-median25`
+
+The function `trimmed-median` is an alias for `trimmed-median5`.
+
+## how to use
+
+Use it in alarms like this:
+
+```
+ alarm: my_alarm
+ on: my_chart
+lookup: median -1m unaligned of my_dimension
+ warn: $this > 1000
+```
+
+`median` does not change the units. For example, if the chart units is `requests/sec`, the result
+will be again expressed in the same units.
+
+It can also be used in APIs and badges as `&group=median` in the URL. Additionally, a percentage may be given with
+`&group_options=` to trim all small and big values before finding the median.
+
+## Examples
+
+Examining last 1 minute `successful` web server responses:
+
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=median&after=-60&label=median&value_color=orange)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max)
+
+## References
+
+- <https://en.wikipedia.org/wiki/Median>.
+
+
diff --git a/web/api/queries/median/median.c b/web/api/queries/median/median.c
new file mode 100644
index 0000000..40fd4ec
--- /dev/null
+++ b/web/api/queries/median/median.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "median.h"
+
+// ----------------------------------------------------------------------------
+// median
+
+struct grouping_median {
+ size_t series_size;
+ size_t next_pos;
+ NETDATA_DOUBLE percent;
+
+ NETDATA_DOUBLE *series;
+};
+
+void grouping_create_median_internal(RRDR *r, const char *options, NETDATA_DOUBLE def) {
+ long entries = r->group;
+ if(entries < 10) entries = 10;
+
+ struct grouping_median *g = (struct grouping_median *)onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_median));
+ g->series = onewayalloc_mallocz(r->internal.owa, entries * sizeof(NETDATA_DOUBLE));
+ g->series_size = (size_t)entries;
+
+ g->percent = def;
+ if(options && *options) {
+ g->percent = str2ndd(options, NULL);
+ if(!netdata_double_isnumber(g->percent)) g->percent = 0.0;
+ if(g->percent < 0.0) g->percent = 0.0;
+ if(g->percent > 50.0) g->percent = 50.0;
+ }
+
+ g->percent = g->percent / 100.0;
+ r->internal.grouping_data = g;
+}
+
+void grouping_create_median(RRDR *r, const char *options) {
+ grouping_create_median_internal(r, options, 0.0);
+}
+void grouping_create_trimmed_median1(RRDR *r, const char *options) {
+ grouping_create_median_internal(r, options, 1.0);
+}
+void grouping_create_trimmed_median2(RRDR *r, const char *options) {
+ grouping_create_median_internal(r, options, 2.0);
+}
+void grouping_create_trimmed_median3(RRDR *r, const char *options) {
+ grouping_create_median_internal(r, options, 3.0);
+}
+void grouping_create_trimmed_median5(RRDR *r, const char *options) {
+ grouping_create_median_internal(r, options, 5.0);
+}
+void grouping_create_trimmed_median10(RRDR *r, const char *options) {
+ grouping_create_median_internal(r, options, 10.0);
+}
+void grouping_create_trimmed_median15(RRDR *r, const char *options) {
+ grouping_create_median_internal(r, options, 15.0);
+}
+void grouping_create_trimmed_median20(RRDR *r, const char *options) {
+ grouping_create_median_internal(r, options, 20.0);
+}
+void grouping_create_trimmed_median25(RRDR *r, const char *options) {
+ grouping_create_median_internal(r, options, 25.0);
+}
+
+// resets when switches dimensions
+// so, clear everything to restart
+void grouping_reset_median(RRDR *r) {
+ struct grouping_median *g = (struct grouping_median *)r->internal.grouping_data;
+ g->next_pos = 0;
+}
+
+void grouping_free_median(RRDR *r) {
+ struct grouping_median *g = (struct grouping_median *)r->internal.grouping_data;
+ if(g) onewayalloc_freez(r->internal.owa, g->series);
+
+ onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
+ r->internal.grouping_data = NULL;
+}
+
+void grouping_add_median(RRDR *r, NETDATA_DOUBLE value) {
+ struct grouping_median *g = (struct grouping_median *)r->internal.grouping_data;
+
+ if(unlikely(g->next_pos >= g->series_size)) {
+ g->series = onewayalloc_doublesize( r->internal.owa, g->series, g->series_size * sizeof(NETDATA_DOUBLE));
+ g->series_size *= 2;
+ }
+
+ g->series[g->next_pos++] = value;
+}
+
+NETDATA_DOUBLE grouping_flush_median(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct grouping_median *g = (struct grouping_median *)r->internal.grouping_data;
+
+ size_t available_slots = g->next_pos;
+ NETDATA_DOUBLE value;
+
+ if(unlikely(!available_slots)) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ else if(available_slots == 1) {
+ value = g->series[0];
+ }
+ else {
+ sort_series(g->series, available_slots);
+
+ size_t start_slot = 0;
+ size_t end_slot = available_slots - 1;
+
+ if(g->percent > 0.0) {
+ NETDATA_DOUBLE min = g->series[0];
+ NETDATA_DOUBLE max = g->series[available_slots - 1];
+ NETDATA_DOUBLE delta = (max - min) * g->percent;
+
+ NETDATA_DOUBLE wanted_min = min + delta;
+ NETDATA_DOUBLE wanted_max = max - delta;
+
+ for (start_slot = 0; start_slot < available_slots; start_slot++)
+ if (g->series[start_slot] >= wanted_min) break;
+
+ for (end_slot = available_slots - 1; end_slot > start_slot; end_slot--)
+ if (g->series[end_slot] <= wanted_max) break;
+ }
+
+ if(start_slot == end_slot)
+ value = g->series[start_slot];
+ else
+ value = median_on_sorted_series(&g->series[start_slot], end_slot - start_slot + 1);
+ }
+
+ if(unlikely(!netdata_double_isnumber(value))) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+
+ //log_series_to_stderr(g->series, g->next_pos, value, "median");
+
+ g->next_pos = 0;
+
+ return value;
+}
diff --git a/web/api/queries/median/median.h b/web/api/queries/median/median.h
new file mode 100644
index 0000000..9fc159d
--- /dev/null
+++ b/web/api/queries/median/median.h
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_QUERIES_MEDIAN_H
+#define NETDATA_API_QUERIES_MEDIAN_H
+
+#include "../query.h"
+#include "../rrdr.h"
+
+void grouping_create_median(RRDR *r, const char *options);
+void grouping_create_trimmed_median1(RRDR *r, const char *options);
+void grouping_create_trimmed_median2(RRDR *r, const char *options);
+void grouping_create_trimmed_median3(RRDR *r, const char *options);
+void grouping_create_trimmed_median5(RRDR *r, const char *options);
+void grouping_create_trimmed_median10(RRDR *r, const char *options);
+void grouping_create_trimmed_median15(RRDR *r, const char *options);
+void grouping_create_trimmed_median20(RRDR *r, const char *options);
+void grouping_create_trimmed_median25(RRDR *r, const char *options);
+void grouping_reset_median(RRDR *r);
+void grouping_free_median(RRDR *r);
+void grouping_add_median(RRDR *r, NETDATA_DOUBLE value);
+NETDATA_DOUBLE grouping_flush_median(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+
+#endif //NETDATA_API_QUERIES_MEDIAN_H
diff --git a/web/api/queries/min/Makefile.am b/web/api/queries/min/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/web/api/queries/min/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/queries/min/README.md b/web/api/queries/min/README.md
new file mode 100644
index 0000000..495523c
--- /dev/null
+++ b/web/api/queries/min/README.md
@@ -0,0 +1,38 @@
+<!--
+title: "Min"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/min/README.md
+-->
+
+# Min
+
+This module finds the min value in the time-frame given.
+
+## how to use
+
+Use it in alarms like this:
+
+```
+ alarm: my_alarm
+ on: my_chart
+lookup: min -1m unaligned of my_dimension
+ warn: $this > 1000
+```
+
+`min` does not change the units. For example, if the chart units is `requests/sec`, the result
+will be again expressed in the same units.
+
+It can also be used in APIs and badges as `&group=min` in the URL.
+
+## Examples
+
+Examining last 1 minute `successful` web server responses:
+
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min&value_color=orange)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max)
+
+## References
+
+- <https://en.wikipedia.org/wiki/Sample_maximum_and_minimum>.
+
+
diff --git a/web/api/queries/min/min.c b/web/api/queries/min/min.c
new file mode 100644
index 0000000..1752e9e
--- /dev/null
+++ b/web/api/queries/min/min.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "min.h"
+
+// ----------------------------------------------------------------------------
+// min
+
+struct grouping_min {
+ NETDATA_DOUBLE min;
+ size_t count;
+};
+
+void grouping_create_min(RRDR *r, const char *options __maybe_unused) {
+ r->internal.grouping_data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_min));
+}
+
+// resets when switches dimensions
+// so, clear everything to restart
+void grouping_reset_min(RRDR *r) {
+ struct grouping_min *g = (struct grouping_min *)r->internal.grouping_data;
+ g->min = 0;
+ g->count = 0;
+}
+
+void grouping_free_min(RRDR *r) {
+ onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
+ r->internal.grouping_data = NULL;
+}
+
+void grouping_add_min(RRDR *r, NETDATA_DOUBLE value) {
+ struct grouping_min *g = (struct grouping_min *)r->internal.grouping_data;
+
+ if(!g->count || fabsndd(value) < fabsndd(g->min)) {
+ g->min = value;
+ g->count++;
+ }
+}
+
+NETDATA_DOUBLE grouping_flush_min(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct grouping_min *g = (struct grouping_min *)r->internal.grouping_data;
+
+ NETDATA_DOUBLE value;
+
+ if(unlikely(!g->count)) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ else {
+ value = g->min;
+ }
+
+ g->min = 0.0;
+ g->count = 0;
+
+ return value;
+}
+
diff --git a/web/api/queries/min/min.h b/web/api/queries/min/min.h
new file mode 100644
index 0000000..dcdfe25
--- /dev/null
+++ b/web/api/queries/min/min.h
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_QUERY_MIN_H
+#define NETDATA_API_QUERY_MIN_H
+
+#include "../query.h"
+#include "../rrdr.h"
+
+void grouping_create_min(RRDR *r, const char *options __maybe_unused);
+void grouping_reset_min(RRDR *r);
+void grouping_free_min(RRDR *r);
+void grouping_add_min(RRDR *r, NETDATA_DOUBLE value);
+NETDATA_DOUBLE grouping_flush_min(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+
+#endif //NETDATA_API_QUERY_MIN_H
diff --git a/web/api/queries/percentile/Makefile.am b/web/api/queries/percentile/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/web/api/queries/percentile/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/queries/percentile/README.md b/web/api/queries/percentile/README.md
new file mode 100644
index 0000000..70afc74
--- /dev/null
+++ b/web/api/queries/percentile/README.md
@@ -0,0 +1,58 @@
+<!--
+title: "Percentile"
+description: "Use percentile in API queries and health entities to find the 'percentile' value from a sample, eliminating any unwanted spikes in the returned metrics."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/percentile/README.md
+-->
+
+# Percentile
+
+The percentile is the average value of a series using only the smaller N percentile of the values.
+(a population or a probability distribution).
+
+Netdata applies linear interpolation on the last point, if the percentile requested does not give a round number of
+points.
+
+The following percentile aliases are defined:
+
+- `percentile25`
+- `percentile50`
+- `percentile75`
+- `percentile80`
+- `percentile90`
+- `percentile95`
+- `percentile97`
+- `percentile98`
+- `percentile99`
+
+The default `percentile` is an alias for `percentile95`.
+Any percentile may be requested using the `group_options` query parameter.
+
+## how to use
+
+Use it in alarms like this:
+
+```
+ alarm: my_alarm
+ on: my_chart
+lookup: percentile95 -1m unaligned of my_dimension
+ warn: $this > 1000
+```
+
+`percentile` does not change the units. For example, if the chart units is `requests/sec`, the result
+will be again expressed in the same units.
+
+It can also be used in APIs and badges as `&group=percentile` in the URL and the additional parameter `group_options`
+may be used to request any percentile (e.g. `&group=percentile&group_options=96`).
+
+## Examples
+
+Examining last 1 minute `successful` web server responses:
+
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=percentile95&after=-60&label=percentile95&value_color=orange)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max)
+
+## References
+
+- <https://en.wikipedia.org/wiki/Percentile>.
diff --git a/web/api/queries/percentile/percentile.c b/web/api/queries/percentile/percentile.c
new file mode 100644
index 0000000..88f8600
--- /dev/null
+++ b/web/api/queries/percentile/percentile.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "percentile.h"
+
+// ----------------------------------------------------------------------------
+// median
+
+struct grouping_percentile {
+ size_t series_size;
+ size_t next_pos;
+ NETDATA_DOUBLE percent;
+
+ NETDATA_DOUBLE *series;
+};
+
+static void grouping_create_percentile_internal(RRDR *r, const char *options, NETDATA_DOUBLE def) {
+ long entries = r->group;
+ if(entries < 10) entries = 10;
+
+ struct grouping_percentile *g = (struct grouping_percentile *)onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_percentile));
+ g->series = onewayalloc_mallocz(r->internal.owa, entries * sizeof(NETDATA_DOUBLE));
+ g->series_size = (size_t)entries;
+
+ g->percent = def;
+ if(options && *options) {
+ g->percent = str2ndd(options, NULL);
+ if(!netdata_double_isnumber(g->percent)) g->percent = 0.0;
+ if(g->percent < 0.0) g->percent = 0.0;
+ if(g->percent > 100.0) g->percent = 100.0;
+ }
+
+ g->percent = g->percent / 100.0;
+ r->internal.grouping_data = g;
+}
+
+void grouping_create_percentile25(RRDR *r, const char *options) {
+ grouping_create_percentile_internal(r, options, 25.0);
+}
+void grouping_create_percentile50(RRDR *r, const char *options) {
+ grouping_create_percentile_internal(r, options, 50.0);
+}
+void grouping_create_percentile75(RRDR *r, const char *options) {
+ grouping_create_percentile_internal(r, options, 75.0);
+}
+void grouping_create_percentile80(RRDR *r, const char *options) {
+ grouping_create_percentile_internal(r, options, 80.0);
+}
+void grouping_create_percentile90(RRDR *r, const char *options) {
+ grouping_create_percentile_internal(r, options, 90.0);
+}
+void grouping_create_percentile95(RRDR *r, const char *options) {
+ grouping_create_percentile_internal(r, options, 95.0);
+}
+void grouping_create_percentile97(RRDR *r, const char *options) {
+ grouping_create_percentile_internal(r, options, 97.0);
+}
+void grouping_create_percentile98(RRDR *r, const char *options) {
+ grouping_create_percentile_internal(r, options, 98.0);
+}
+void grouping_create_percentile99(RRDR *r, const char *options) {
+ grouping_create_percentile_internal(r, options, 99.0);
+}
+
+// resets when switches dimensions
+// so, clear everything to restart
+void grouping_reset_percentile(RRDR *r) {
+ struct grouping_percentile *g = (struct grouping_percentile *)r->internal.grouping_data;
+ g->next_pos = 0;
+}
+
+void grouping_free_percentile(RRDR *r) {
+ struct grouping_percentile *g = (struct grouping_percentile *)r->internal.grouping_data;
+ if(g) onewayalloc_freez(r->internal.owa, g->series);
+
+ onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
+ r->internal.grouping_data = NULL;
+}
+
+void grouping_add_percentile(RRDR *r, NETDATA_DOUBLE value) {
+ struct grouping_percentile *g = (struct grouping_percentile *)r->internal.grouping_data;
+
+ if(unlikely(g->next_pos >= g->series_size)) {
+ g->series = onewayalloc_doublesize( r->internal.owa, g->series, g->series_size * sizeof(NETDATA_DOUBLE));
+ g->series_size *= 2;
+ }
+
+ g->series[g->next_pos++] = value;
+}
+
+NETDATA_DOUBLE grouping_flush_percentile(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct grouping_percentile *g = (struct grouping_percentile *)r->internal.grouping_data;
+
+ NETDATA_DOUBLE value;
+ size_t available_slots = g->next_pos;
+
+ if(unlikely(!available_slots)) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ else if(available_slots == 1) {
+ value = g->series[0];
+ }
+ else {
+ sort_series(g->series, available_slots);
+
+ NETDATA_DOUBLE min = g->series[0];
+ NETDATA_DOUBLE max = g->series[available_slots - 1];
+
+ if (min != max) {
+ size_t slots_to_use = (size_t)((NETDATA_DOUBLE)available_slots * g->percent);
+ if(!slots_to_use) slots_to_use = 1;
+
+ NETDATA_DOUBLE percent_to_use = (NETDATA_DOUBLE)slots_to_use / (NETDATA_DOUBLE)available_slots;
+ NETDATA_DOUBLE percent_delta = g->percent - percent_to_use;
+
+ NETDATA_DOUBLE percent_interpolation_slot = 0.0;
+ NETDATA_DOUBLE percent_last_slot = 0.0;
+ if(percent_delta > 0.0) {
+ NETDATA_DOUBLE percent_to_use_plus_1_slot = (NETDATA_DOUBLE)(slots_to_use + 1) / (NETDATA_DOUBLE)available_slots;
+ NETDATA_DOUBLE percent_1slot = percent_to_use_plus_1_slot - percent_to_use;
+
+ percent_interpolation_slot = percent_delta / percent_1slot;
+ percent_last_slot = 1 - percent_interpolation_slot;
+ }
+
+ int start_slot, stop_slot, step, last_slot, interpolation_slot;
+ if(min >= 0.0 && max >= 0.0) {
+ start_slot = 0;
+ stop_slot = start_slot + (int)slots_to_use;
+ last_slot = stop_slot - 1;
+ interpolation_slot = stop_slot;
+ step = 1;
+ }
+ else {
+ start_slot = (int)available_slots - 1;
+ stop_slot = start_slot - (int)slots_to_use;
+ last_slot = stop_slot + 1;
+ interpolation_slot = stop_slot;
+ step = -1;
+ }
+
+ value = 0.0;
+ for(int slot = start_slot; slot != stop_slot ; slot += step)
+ value += g->series[slot];
+
+ size_t counted = slots_to_use;
+ if(percent_interpolation_slot > 0.0 && interpolation_slot >= 0 && interpolation_slot < (int)available_slots) {
+ value += g->series[interpolation_slot] * percent_interpolation_slot;
+ value += g->series[last_slot] * percent_last_slot;
+ counted++;
+ }
+
+ value = value / (NETDATA_DOUBLE)counted;
+ }
+ else
+ value = min;
+ }
+
+ if(unlikely(!netdata_double_isnumber(value))) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+
+ //log_series_to_stderr(g->series, g->next_pos, value, "percentile");
+
+ g->next_pos = 0;
+
+ return value;
+}
diff --git a/web/api/queries/percentile/percentile.h b/web/api/queries/percentile/percentile.h
new file mode 100644
index 0000000..65e335c
--- /dev/null
+++ b/web/api/queries/percentile/percentile.h
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_QUERIES_PERCENTILE_H
+#define NETDATA_API_QUERIES_PERCENTILE_H
+
+#include "../query.h"
+#include "../rrdr.h"
+
+void grouping_create_percentile25(RRDR *r, const char *options);
+void grouping_create_percentile50(RRDR *r, const char *options);
+void grouping_create_percentile75(RRDR *r, const char *options);
+void grouping_create_percentile80(RRDR *r, const char *options);
+void grouping_create_percentile90(RRDR *r, const char *options);
+void grouping_create_percentile95(RRDR *r, const char *options);
+void grouping_create_percentile97(RRDR *r, const char *options);
+void grouping_create_percentile98(RRDR *r, const char *options);
+void grouping_create_percentile99(RRDR *r, const char *options );
+void grouping_reset_percentile(RRDR *r);
+void grouping_free_percentile(RRDR *r);
+void grouping_add_percentile(RRDR *r, NETDATA_DOUBLE value);
+NETDATA_DOUBLE grouping_flush_percentile(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+
+#endif //NETDATA_API_QUERIES_PERCENTILE_H
diff --git a/web/api/queries/query.c b/web/api/queries/query.c
new file mode 100644
index 0000000..0365b6e
--- /dev/null
+++ b/web/api/queries/query.c
@@ -0,0 +1,2175 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "query.h"
+#include "web/api/formatters/rrd2json.h"
+#include "rrdr.h"
+
+#include "average/average.h"
+#include "countif/countif.h"
+#include "incremental_sum/incremental_sum.h"
+#include "max/max.h"
+#include "median/median.h"
+#include "min/min.h"
+#include "sum/sum.h"
+#include "stddev/stddev.h"
+#include "ses/ses.h"
+#include "des/des.h"
+#include "percentile/percentile.h"
+#include "trimmed_mean/trimmed_mean.h"
+
+// ----------------------------------------------------------------------------
+
+static struct {
+ const char *name;
+ uint32_t hash;
+ RRDR_GROUPING value;
+
+ // One time initialization for the module.
+ // This is called once, when netdata starts.
+ void (*init)(void);
+
+ // Allocate all required structures for a query.
+ // This is called once for each netdata query.
+ void (*create)(struct rrdresult *r, const char *options);
+
+ // Cleanup collected values, but don't destroy the structures.
+ // This is called when the query engine switches dimensions,
+ // as part of the same query (so same chart, switching metric).
+ void (*reset)(struct rrdresult *r);
+
+ // Free all resources allocated for the query.
+ void (*free)(struct rrdresult *r);
+
+ // Add a single value into the calculation.
+ // The module may decide to cache it, or use it in the fly.
+ void (*add)(struct rrdresult *r, NETDATA_DOUBLE value);
+
+ // Generate a single result for the values added so far.
+ // More values and points may be requested later.
+ // It is up to the module to reset its internal structures
+ // when flushing it (so for a few modules it may be better to
+ // continue after a flush as if nothing changed, for others a
+ // cleanup of the internal structures may be required).
+ NETDATA_DOUBLE (*flush)(struct rrdresult *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+
+ TIER_QUERY_FETCH tier_query_fetch;
+} api_v1_data_groups[] = {
+ {.name = "average",
+ .hash = 0,
+ .value = RRDR_GROUPING_AVERAGE,
+ .init = NULL,
+ .create= grouping_create_average,
+ .reset = grouping_reset_average,
+ .free = grouping_free_average,
+ .add = grouping_add_average,
+ .flush = grouping_flush_average,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "mean", // alias on 'average'
+ .hash = 0,
+ .value = RRDR_GROUPING_AVERAGE,
+ .init = NULL,
+ .create= grouping_create_average,
+ .reset = grouping_reset_average,
+ .free = grouping_free_average,
+ .add = grouping_add_average,
+ .flush = grouping_flush_average,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-mean1",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEAN1,
+ .init = NULL,
+ .create= grouping_create_trimmed_mean1,
+ .reset = grouping_reset_trimmed_mean,
+ .free = grouping_free_trimmed_mean,
+ .add = grouping_add_trimmed_mean,
+ .flush = grouping_flush_trimmed_mean,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-mean2",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEAN2,
+ .init = NULL,
+ .create= grouping_create_trimmed_mean2,
+ .reset = grouping_reset_trimmed_mean,
+ .free = grouping_free_trimmed_mean,
+ .add = grouping_add_trimmed_mean,
+ .flush = grouping_flush_trimmed_mean,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-mean3",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEAN3,
+ .init = NULL,
+ .create= grouping_create_trimmed_mean3,
+ .reset = grouping_reset_trimmed_mean,
+ .free = grouping_free_trimmed_mean,
+ .add = grouping_add_trimmed_mean,
+ .flush = grouping_flush_trimmed_mean,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-mean5",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEAN5,
+ .init = NULL,
+ .create= grouping_create_trimmed_mean5,
+ .reset = grouping_reset_trimmed_mean,
+ .free = grouping_free_trimmed_mean,
+ .add = grouping_add_trimmed_mean,
+ .flush = grouping_flush_trimmed_mean,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-mean10",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEAN10,
+ .init = NULL,
+ .create= grouping_create_trimmed_mean10,
+ .reset = grouping_reset_trimmed_mean,
+ .free = grouping_free_trimmed_mean,
+ .add = grouping_add_trimmed_mean,
+ .flush = grouping_flush_trimmed_mean,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-mean15",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEAN15,
+ .init = NULL,
+ .create= grouping_create_trimmed_mean15,
+ .reset = grouping_reset_trimmed_mean,
+ .free = grouping_free_trimmed_mean,
+ .add = grouping_add_trimmed_mean,
+ .flush = grouping_flush_trimmed_mean,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-mean20",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEAN20,
+ .init = NULL,
+ .create= grouping_create_trimmed_mean20,
+ .reset = grouping_reset_trimmed_mean,
+ .free = grouping_free_trimmed_mean,
+ .add = grouping_add_trimmed_mean,
+ .flush = grouping_flush_trimmed_mean,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-mean25",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEAN25,
+ .init = NULL,
+ .create= grouping_create_trimmed_mean25,
+ .reset = grouping_reset_trimmed_mean,
+ .free = grouping_free_trimmed_mean,
+ .add = grouping_add_trimmed_mean,
+ .flush = grouping_flush_trimmed_mean,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-mean",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEAN5,
+ .init = NULL,
+ .create= grouping_create_trimmed_mean5,
+ .reset = grouping_reset_trimmed_mean,
+ .free = grouping_free_trimmed_mean,
+ .add = grouping_add_trimmed_mean,
+ .flush = grouping_flush_trimmed_mean,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "incremental_sum",
+ .hash = 0,
+ .value = RRDR_GROUPING_INCREMENTAL_SUM,
+ .init = NULL,
+ .create= grouping_create_incremental_sum,
+ .reset = grouping_reset_incremental_sum,
+ .free = grouping_free_incremental_sum,
+ .add = grouping_add_incremental_sum,
+ .flush = grouping_flush_incremental_sum,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "incremental-sum",
+ .hash = 0,
+ .value = RRDR_GROUPING_INCREMENTAL_SUM,
+ .init = NULL,
+ .create= grouping_create_incremental_sum,
+ .reset = grouping_reset_incremental_sum,
+ .free = grouping_free_incremental_sum,
+ .add = grouping_add_incremental_sum,
+ .flush = grouping_flush_incremental_sum,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "median",
+ .hash = 0,
+ .value = RRDR_GROUPING_MEDIAN,
+ .init = NULL,
+ .create= grouping_create_median,
+ .reset = grouping_reset_median,
+ .free = grouping_free_median,
+ .add = grouping_add_median,
+ .flush = grouping_flush_median,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-median1",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEDIAN1,
+ .init = NULL,
+ .create= grouping_create_trimmed_median1,
+ .reset = grouping_reset_median,
+ .free = grouping_free_median,
+ .add = grouping_add_median,
+ .flush = grouping_flush_median,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-median2",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEDIAN2,
+ .init = NULL,
+ .create= grouping_create_trimmed_median2,
+ .reset = grouping_reset_median,
+ .free = grouping_free_median,
+ .add = grouping_add_median,
+ .flush = grouping_flush_median,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-median3",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEDIAN3,
+ .init = NULL,
+ .create= grouping_create_trimmed_median3,
+ .reset = grouping_reset_median,
+ .free = grouping_free_median,
+ .add = grouping_add_median,
+ .flush = grouping_flush_median,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-median5",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEDIAN5,
+ .init = NULL,
+ .create= grouping_create_trimmed_median5,
+ .reset = grouping_reset_median,
+ .free = grouping_free_median,
+ .add = grouping_add_median,
+ .flush = grouping_flush_median,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-median10",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEDIAN10,
+ .init = NULL,
+ .create= grouping_create_trimmed_median10,
+ .reset = grouping_reset_median,
+ .free = grouping_free_median,
+ .add = grouping_add_median,
+ .flush = grouping_flush_median,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-median15",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEDIAN15,
+ .init = NULL,
+ .create= grouping_create_trimmed_median15,
+ .reset = grouping_reset_median,
+ .free = grouping_free_median,
+ .add = grouping_add_median,
+ .flush = grouping_flush_median,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-median20",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEDIAN20,
+ .init = NULL,
+ .create= grouping_create_trimmed_median20,
+ .reset = grouping_reset_median,
+ .free = grouping_free_median,
+ .add = grouping_add_median,
+ .flush = grouping_flush_median,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-median25",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEDIAN25,
+ .init = NULL,
+ .create= grouping_create_trimmed_median25,
+ .reset = grouping_reset_median,
+ .free = grouping_free_median,
+ .add = grouping_add_median,
+ .flush = grouping_flush_median,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-median",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEDIAN5,
+ .init = NULL,
+ .create= grouping_create_trimmed_median5,
+ .reset = grouping_reset_median,
+ .free = grouping_free_median,
+ .add = grouping_add_median,
+ .flush = grouping_flush_median,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "percentile25",
+ .hash = 0,
+ .value = RRDR_GROUPING_PERCENTILE25,
+ .init = NULL,
+ .create= grouping_create_percentile25,
+ .reset = grouping_reset_percentile,
+ .free = grouping_free_percentile,
+ .add = grouping_add_percentile,
+ .flush = grouping_flush_percentile,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "percentile50",
+ .hash = 0,
+ .value = RRDR_GROUPING_PERCENTILE50,
+ .init = NULL,
+ .create= grouping_create_percentile50,
+ .reset = grouping_reset_percentile,
+ .free = grouping_free_percentile,
+ .add = grouping_add_percentile,
+ .flush = grouping_flush_percentile,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "percentile75",
+ .hash = 0,
+ .value = RRDR_GROUPING_PERCENTILE75,
+ .init = NULL,
+ .create= grouping_create_percentile75,
+ .reset = grouping_reset_percentile,
+ .free = grouping_free_percentile,
+ .add = grouping_add_percentile,
+ .flush = grouping_flush_percentile,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "percentile80",
+ .hash = 0,
+ .value = RRDR_GROUPING_PERCENTILE80,
+ .init = NULL,
+ .create= grouping_create_percentile80,
+ .reset = grouping_reset_percentile,
+ .free = grouping_free_percentile,
+ .add = grouping_add_percentile,
+ .flush = grouping_flush_percentile,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "percentile90",
+ .hash = 0,
+ .value = RRDR_GROUPING_PERCENTILE90,
+ .init = NULL,
+ .create= grouping_create_percentile90,
+ .reset = grouping_reset_percentile,
+ .free = grouping_free_percentile,
+ .add = grouping_add_percentile,
+ .flush = grouping_flush_percentile,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "percentile95",
+ .hash = 0,
+ .value = RRDR_GROUPING_PERCENTILE95,
+ .init = NULL,
+ .create= grouping_create_percentile95,
+ .reset = grouping_reset_percentile,
+ .free = grouping_free_percentile,
+ .add = grouping_add_percentile,
+ .flush = grouping_flush_percentile,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "percentile97",
+ .hash = 0,
+ .value = RRDR_GROUPING_PERCENTILE97,
+ .init = NULL,
+ .create= grouping_create_percentile97,
+ .reset = grouping_reset_percentile,
+ .free = grouping_free_percentile,
+ .add = grouping_add_percentile,
+ .flush = grouping_flush_percentile,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "percentile98",
+ .hash = 0,
+ .value = RRDR_GROUPING_PERCENTILE98,
+ .init = NULL,
+ .create= grouping_create_percentile98,
+ .reset = grouping_reset_percentile,
+ .free = grouping_free_percentile,
+ .add = grouping_add_percentile,
+ .flush = grouping_flush_percentile,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "percentile99",
+ .hash = 0,
+ .value = RRDR_GROUPING_PERCENTILE99,
+ .init = NULL,
+ .create= grouping_create_percentile99,
+ .reset = grouping_reset_percentile,
+ .free = grouping_free_percentile,
+ .add = grouping_add_percentile,
+ .flush = grouping_flush_percentile,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "percentile",
+ .hash = 0,
+ .value = RRDR_GROUPING_PERCENTILE95,
+ .init = NULL,
+ .create= grouping_create_percentile95,
+ .reset = grouping_reset_percentile,
+ .free = grouping_free_percentile,
+ .add = grouping_add_percentile,
+ .flush = grouping_flush_percentile,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "min",
+ .hash = 0,
+ .value = RRDR_GROUPING_MIN,
+ .init = NULL,
+ .create= grouping_create_min,
+ .reset = grouping_reset_min,
+ .free = grouping_free_min,
+ .add = grouping_add_min,
+ .flush = grouping_flush_min,
+ .tier_query_fetch = TIER_QUERY_FETCH_MIN
+ },
+ {.name = "max",
+ .hash = 0,
+ .value = RRDR_GROUPING_MAX,
+ .init = NULL,
+ .create= grouping_create_max,
+ .reset = grouping_reset_max,
+ .free = grouping_free_max,
+ .add = grouping_add_max,
+ .flush = grouping_flush_max,
+ .tier_query_fetch = TIER_QUERY_FETCH_MAX
+ },
+ {.name = "sum",
+ .hash = 0,
+ .value = RRDR_GROUPING_SUM,
+ .init = NULL,
+ .create= grouping_create_sum,
+ .reset = grouping_reset_sum,
+ .free = grouping_free_sum,
+ .add = grouping_add_sum,
+ .flush = grouping_flush_sum,
+ .tier_query_fetch = TIER_QUERY_FETCH_SUM
+ },
+
+ // standard deviation
+ {.name = "stddev",
+ .hash = 0,
+ .value = RRDR_GROUPING_STDDEV,
+ .init = NULL,
+ .create= grouping_create_stddev,
+ .reset = grouping_reset_stddev,
+ .free = grouping_free_stddev,
+ .add = grouping_add_stddev,
+ .flush = grouping_flush_stddev,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "cv", // coefficient variation is calculated by stddev
+ .hash = 0,
+ .value = RRDR_GROUPING_CV,
+ .init = NULL,
+ .create= grouping_create_stddev, // not an error, stddev calculates this too
+ .reset = grouping_reset_stddev, // not an error, stddev calculates this too
+ .free = grouping_free_stddev, // not an error, stddev calculates this too
+ .add = grouping_add_stddev, // not an error, stddev calculates this too
+ .flush = grouping_flush_coefficient_of_variation,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "rsd", // alias of 'cv'
+ .hash = 0,
+ .value = RRDR_GROUPING_CV,
+ .init = NULL,
+ .create= grouping_create_stddev, // not an error, stddev calculates this too
+ .reset = grouping_reset_stddev, // not an error, stddev calculates this too
+ .free = grouping_free_stddev, // not an error, stddev calculates this too
+ .add = grouping_add_stddev, // not an error, stddev calculates this too
+ .flush = grouping_flush_coefficient_of_variation,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+
+ /*
+ {.name = "mean", // same as average, no need to define it again
+ .hash = 0,
+ .value = RRDR_GROUPING_MEAN,
+ .setup = NULL,
+ .create= grouping_create_stddev,
+ .reset = grouping_reset_stddev,
+ .free = grouping_free_stddev,
+ .add = grouping_add_stddev,
+ .flush = grouping_flush_mean,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ */
+
+ /*
+ {.name = "variance", // meaningless to offer
+ .hash = 0,
+ .value = RRDR_GROUPING_VARIANCE,
+ .setup = NULL,
+ .create= grouping_create_stddev,
+ .reset = grouping_reset_stddev,
+ .free = grouping_free_stddev,
+ .add = grouping_add_stddev,
+ .flush = grouping_flush_variance,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ */
+
+ // single exponential smoothing
+ {.name = "ses",
+ .hash = 0,
+ .value = RRDR_GROUPING_SES,
+ .init = grouping_init_ses,
+ .create= grouping_create_ses,
+ .reset = grouping_reset_ses,
+ .free = grouping_free_ses,
+ .add = grouping_add_ses,
+ .flush = grouping_flush_ses,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "ema", // alias for 'ses'
+ .hash = 0,
+ .value = RRDR_GROUPING_SES,
+ .init = NULL,
+ .create= grouping_create_ses,
+ .reset = grouping_reset_ses,
+ .free = grouping_free_ses,
+ .add = grouping_add_ses,
+ .flush = grouping_flush_ses,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "ewma", // alias for ses
+ .hash = 0,
+ .value = RRDR_GROUPING_SES,
+ .init = NULL,
+ .create= grouping_create_ses,
+ .reset = grouping_reset_ses,
+ .free = grouping_free_ses,
+ .add = grouping_add_ses,
+ .flush = grouping_flush_ses,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+
+ // double exponential smoothing
+ {.name = "des",
+ .hash = 0,
+ .value = RRDR_GROUPING_DES,
+ .init = grouping_init_des,
+ .create= grouping_create_des,
+ .reset = grouping_reset_des,
+ .free = grouping_free_des,
+ .add = grouping_add_des,
+ .flush = grouping_flush_des,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+
+ {.name = "countif",
+ .hash = 0,
+ .value = RRDR_GROUPING_COUNTIF,
+ .init = NULL,
+ .create= grouping_create_countif,
+ .reset = grouping_reset_countif,
+ .free = grouping_free_countif,
+ .add = grouping_add_countif,
+ .flush = grouping_flush_countif,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+
+ // terminator
+ {.name = NULL,
+ .hash = 0,
+ .value = RRDR_GROUPING_UNDEFINED,
+ .init = NULL,
+ .create= grouping_create_average,
+ .reset = grouping_reset_average,
+ .free = grouping_free_average,
+ .add = grouping_add_average,
+ .flush = grouping_flush_average,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ }
+};
+
+void web_client_api_v1_init_grouping(void) {
+ int i;
+
+ for(i = 0; api_v1_data_groups[i].name ; i++) {
+ api_v1_data_groups[i].hash = simple_hash(api_v1_data_groups[i].name);
+
+ if(api_v1_data_groups[i].init)
+ api_v1_data_groups[i].init();
+ }
+}
+
+const char *group_method2string(RRDR_GROUPING group) {
+ int i;
+
+ for(i = 0; api_v1_data_groups[i].name ; i++) {
+ if(api_v1_data_groups[i].value == group) {
+ return api_v1_data_groups[i].name;
+ }
+ }
+
+ return "unknown-group-method";
+}
+
+RRDR_GROUPING web_client_api_request_v1_data_group(const char *name, RRDR_GROUPING def) {
+ int i;
+
+ uint32_t hash = simple_hash(name);
+ for(i = 0; api_v1_data_groups[i].name ; i++)
+ if(unlikely(hash == api_v1_data_groups[i].hash && !strcmp(name, api_v1_data_groups[i].name)))
+ return api_v1_data_groups[i].value;
+
+ return def;
+}
+
+const char *web_client_api_request_v1_data_group_to_string(RRDR_GROUPING group) {
+ int i;
+
+ for(i = 0; api_v1_data_groups[i].name ; i++)
+ if(unlikely(group == api_v1_data_groups[i].value))
+ return api_v1_data_groups[i].name;
+
+ return "unknown";
+}
+
+static void rrdr_set_grouping_function(RRDR *r, RRDR_GROUPING group_method) {
+ int i, found = 0;
+ for(i = 0; !found && api_v1_data_groups[i].name ;i++) {
+ if(api_v1_data_groups[i].value == group_method) {
+ r->internal.grouping_create = api_v1_data_groups[i].create;
+ r->internal.grouping_reset = api_v1_data_groups[i].reset;
+ r->internal.grouping_free = api_v1_data_groups[i].free;
+ r->internal.grouping_add = api_v1_data_groups[i].add;
+ r->internal.grouping_flush = api_v1_data_groups[i].flush;
+ r->internal.tier_query_fetch = api_v1_data_groups[i].tier_query_fetch;
+ found = 1;
+ }
+ }
+ if(!found) {
+ errno = 0;
+ internal_error(true, "QUERY: grouping method %u not found. Using 'average'", (unsigned int)group_method);
+ r->internal.grouping_create = grouping_create_average;
+ r->internal.grouping_reset = grouping_reset_average;
+ r->internal.grouping_free = grouping_free_average;
+ r->internal.grouping_add = grouping_add_average;
+ r->internal.grouping_flush = grouping_flush_average;
+ r->internal.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE;
+ }
+}
+
+// ----------------------------------------------------------------------------
+// helpers to find our way in RRDR
+
+static inline RRDR_VALUE_FLAGS *UNUSED_FUNCTION(rrdr_line_options)(RRDR *r, long rrdr_line) {
+ return &r->o[ rrdr_line * r->d ];
+}
+
+static inline NETDATA_DOUBLE *UNUSED_FUNCTION(rrdr_line_values)(RRDR *r, long rrdr_line) {
+ return &r->v[ rrdr_line * r->d ];
+}
+
+static inline long rrdr_line_init(RRDR *r, time_t t, long rrdr_line) {
+ rrdr_line++;
+
+ internal_error(rrdr_line >= (long)r->n,
+ "QUERY: requested to step above RRDR size for query '%s'",
+ r->internal.qt->id);
+
+ internal_error(r->t[rrdr_line] != 0 && r->t[rrdr_line] != t,
+ "QUERY: overwriting the timestamp of RRDR line %zu from %zu to %zu, of query '%s'",
+ (size_t)rrdr_line, (size_t)r->t[rrdr_line], (size_t)t, r->internal.qt->id);
+
+ // save the time
+ r->t[rrdr_line] = t;
+
+ return rrdr_line;
+}
+
+static inline void rrdr_done(RRDR *r, long rrdr_line) {
+ r->rows = rrdr_line + 1;
+}
+
+
+// ----------------------------------------------------------------------------
+// tier management
+
+static bool query_metric_is_valid_tier(QUERY_METRIC *qm, size_t tier) {
+ if(!qm->tiers[tier].db_metric_handle || !qm->tiers[tier].db_first_time_t || !qm->tiers[tier].db_last_time_t || !qm->tiers[tier].db_update_every)
+ return false;
+
+ return true;
+}
+
+static size_t query_metric_first_working_tier(QUERY_METRIC *qm) {
+ for(size_t tier = 0; tier < storage_tiers ; tier++) {
+
+ // find the db time-range for this tier for all metrics
+ STORAGE_METRIC_HANDLE *db_metric_handle = qm->tiers[tier].db_metric_handle;
+ time_t first_t = qm->tiers[tier].db_first_time_t;
+ time_t last_t = qm->tiers[tier].db_last_time_t;
+ time_t update_every = qm->tiers[tier].db_update_every;
+
+ if(!db_metric_handle || !first_t || !last_t || !update_every)
+ continue;
+
+ return tier;
+ }
+
+ return 0;
+}
+
+static long query_plan_points_coverage_weight(time_t db_first_t, time_t db_last_t, time_t db_update_every, time_t after_wanted, time_t before_wanted, size_t points_wanted, size_t tier __maybe_unused) {
+ if(db_first_t == 0 || db_last_t == 0 || db_update_every == 0)
+ return -LONG_MAX;
+
+ time_t common_first_t = MAX(db_first_t, after_wanted);
+ time_t common_last_t = MIN(db_last_t, before_wanted);
+
+ long time_coverage = (common_last_t - common_first_t) * 1000000 / (before_wanted - after_wanted);
+ size_t points_wanted_in_coverage = points_wanted * time_coverage / 1000000;
+
+ long points_available = (common_last_t - common_first_t) / db_update_every;
+ long points_delta = (long)(points_available - points_wanted_in_coverage);
+ long points_coverage = (points_delta < 0) ? (long)(points_available * time_coverage / points_wanted_in_coverage) : time_coverage;
+
+ // a way to benefit higher tiers
+ // points_coverage += (long)tier * 10000;
+
+ if(points_available <= 0)
+ return -LONG_MAX;
+
+ return points_coverage;
+}
+
+static size_t query_metric_best_tier_for_timeframe(QUERY_METRIC *qm, time_t after_wanted, time_t before_wanted, size_t points_wanted) {
+ if(unlikely(storage_tiers < 2))
+ return 0;
+
+ if(unlikely(after_wanted == before_wanted || points_wanted <= 0))
+ return query_metric_first_working_tier(qm);
+
+ long weight[storage_tiers];
+
+ for(size_t tier = 0; tier < storage_tiers ; tier++) {
+
+ // find the db time-range for this tier for all metrics
+ STORAGE_METRIC_HANDLE *db_metric_handle = qm->tiers[tier].db_metric_handle;
+ time_t first_t = qm->tiers[tier].db_first_time_t;
+ time_t last_t = qm->tiers[tier].db_last_time_t;
+ time_t update_every = qm->tiers[tier].db_update_every;
+
+ if(!db_metric_handle || !first_t || !last_t || !update_every) {
+ weight[tier] = -LONG_MAX;
+ continue;
+ }
+
+ weight[tier] = query_plan_points_coverage_weight(first_t, last_t, update_every, after_wanted, before_wanted, points_wanted, tier);
+ }
+
+ size_t best_tier = 0;
+ for(size_t tier = 1; tier < storage_tiers ; tier++) {
+ if(weight[tier] >= weight[best_tier])
+ best_tier = tier;
+ }
+
+ return best_tier;
+}
+
+static size_t rrddim_find_best_tier_for_timeframe(QUERY_TARGET *qt, time_t after_wanted, time_t before_wanted, size_t points_wanted) {
+ if(unlikely(storage_tiers < 2))
+ return 0;
+
+ if(unlikely(after_wanted == before_wanted || points_wanted <= 0)) {
+ internal_error(true, "QUERY: '%s' has invalid params to tier calculation", qt->id);
+ return 0;
+ }
+
+ long weight[storage_tiers];
+
+ for(size_t tier = 0; tier < storage_tiers ; tier++) {
+
+ time_t common_first_t = 0;
+ time_t common_last_t = 0;
+ time_t common_update_every = 0;
+
+ // find the db time-range for this tier for all metrics
+ for(size_t i = 0, used = qt->query.used; i < used ; i++) {
+ QUERY_METRIC *qm = &qt->query.array[i];
+
+ time_t first_t = qm->tiers[tier].db_first_time_t;
+ time_t last_t = qm->tiers[tier].db_last_time_t;
+ time_t update_every = qm->tiers[tier].db_update_every;
+
+ if(!first_t || !last_t || !update_every)
+ continue;
+
+ if(!common_first_t)
+ common_first_t = first_t;
+ else
+ common_first_t = MIN(first_t, common_first_t);
+
+ if(!common_last_t)
+ common_last_t = last_t;
+ else
+ common_last_t = MAX(last_t, common_last_t);
+
+ if(!common_update_every)
+ common_update_every = update_every;
+ else
+ common_update_every = MIN(update_every, common_update_every);
+ }
+
+ weight[tier] = query_plan_points_coverage_weight(common_first_t, common_last_t, common_update_every, after_wanted, before_wanted, points_wanted, tier);
+ }
+
+ size_t best_tier = 0;
+ for(size_t tier = 1; tier < storage_tiers ; tier++) {
+ if(weight[tier] >= weight[best_tier])
+ best_tier = tier;
+ }
+
+ if(weight[best_tier] == -LONG_MAX)
+ best_tier = 0;
+
+ return best_tier;
+}
+
+static time_t rrdset_find_natural_update_every_for_timeframe(QUERY_TARGET *qt, time_t after_wanted, time_t before_wanted, size_t points_wanted, RRDR_OPTIONS options, size_t tier) {
+ size_t best_tier;
+ if((options & RRDR_OPTION_SELECTED_TIER) && tier < storage_tiers)
+ best_tier = tier;
+ else
+ best_tier = rrddim_find_best_tier_for_timeframe(qt, after_wanted, before_wanted, points_wanted);
+
+ // find the db minimum update every for this tier for all metrics
+ time_t common_update_every = default_rrd_update_every;
+ for(size_t i = 0, used = qt->query.used; i < used ; i++) {
+ QUERY_METRIC *qm = &qt->query.array[i];
+
+ time_t update_every = qm->tiers[best_tier].db_update_every;
+
+ if(!i)
+ common_update_every = update_every;
+ else
+ common_update_every = MIN(update_every, common_update_every);
+ }
+
+ return common_update_every;
+}
+
+// ----------------------------------------------------------------------------
+// query ops
+
+typedef struct query_point {
+ time_t end_time;
+ time_t start_time;
+ NETDATA_DOUBLE value;
+ NETDATA_DOUBLE anomaly;
+ SN_FLAGS flags;
+#ifdef NETDATA_INTERNAL_CHECKS
+ size_t id;
+#endif
+} QUERY_POINT;
+
+QUERY_POINT QUERY_POINT_EMPTY = {
+ .end_time = 0,
+ .start_time = 0,
+ .value = NAN,
+ .anomaly = 0,
+ .flags = SN_FLAG_NONE,
+#ifdef NETDATA_INTERNAL_CHECKS
+ .id = 0,
+#endif
+};
+
+#ifdef NETDATA_INTERNAL_CHECKS
+#define query_point_set_id(point, point_id) (point).id = point_id
+#else
+#define query_point_set_id(point, point_id) debug_dummy()
+#endif
+
+typedef struct query_plan_entry {
+ size_t tier;
+ time_t after;
+ time_t before;
+} QUERY_PLAN_ENTRY;
+
+typedef struct query_plan {
+ size_t entries;
+ QUERY_PLAN_ENTRY data[RRD_STORAGE_TIERS*2];
+} QUERY_PLAN;
+
+typedef struct query_engine_ops {
+ // configuration
+ RRDR *r;
+ QUERY_METRIC *qm;
+ time_t view_update_every;
+ time_t query_granularity;
+ TIER_QUERY_FETCH tier_query_fetch;
+
+ // query planer
+ QUERY_PLAN plan;
+ size_t current_plan;
+ time_t current_plan_expire_time;
+
+ // storage queries
+ size_t tier;
+ struct query_metric_tier *tier_ptr;
+ struct storage_engine_query_handle handle;
+ STORAGE_POINT (*next_metric)(struct storage_engine_query_handle *handle);
+ int (*is_finished)(struct storage_engine_query_handle *handle);
+ void (*finalize)(struct storage_engine_query_handle *handle);
+
+ // aggregating points over time
+ void (*grouping_add)(struct rrdresult *r, NETDATA_DOUBLE value);
+ NETDATA_DOUBLE (*grouping_flush)(struct rrdresult *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+ size_t group_points_non_zero;
+ size_t group_points_added;
+ NETDATA_DOUBLE group_anomaly_rate;
+ RRDR_VALUE_FLAGS group_value_flags;
+
+ // statistics
+ size_t db_total_points_read;
+ size_t db_points_read_per_tier[RRD_STORAGE_TIERS];
+} QUERY_ENGINE_OPS;
+
+
+// ----------------------------------------------------------------------------
+// query planer
+
+#define query_plan_should_switch_plan(ops, now) ((now) >= (ops).current_plan_expire_time)
+
+static void query_planer_activate_plan(QUERY_ENGINE_OPS *ops, size_t plan_id, time_t overwrite_after) {
+ if(unlikely(plan_id >= ops->plan.entries))
+ plan_id = ops->plan.entries - 1;
+
+ time_t after = ops->plan.data[plan_id].after;
+ time_t before = ops->plan.data[plan_id].before;
+
+ if(overwrite_after > after && overwrite_after < before)
+ after = overwrite_after;
+
+ ops->tier = ops->plan.data[plan_id].tier;
+ ops->tier_ptr = &ops->qm->tiers[ops->tier];
+ ops->tier_ptr->eng->api.query_ops.init(ops->tier_ptr->db_metric_handle, &ops->handle, after, before);
+ ops->next_metric = ops->tier_ptr->eng->api.query_ops.next_metric;
+ ops->is_finished = ops->tier_ptr->eng->api.query_ops.is_finished;
+ ops->finalize = ops->tier_ptr->eng->api.query_ops.finalize;
+ ops->current_plan = plan_id;
+ ops->current_plan_expire_time = ops->plan.data[plan_id].before;
+}
+
+static void query_planer_next_plan(QUERY_ENGINE_OPS *ops, time_t now, time_t last_point_end_time) {
+ internal_error(now < ops->current_plan_expire_time && now < ops->plan.data[ops->current_plan].before,
+ "QUERY: switching query plan too early!");
+
+ size_t old_plan = ops->current_plan;
+
+ time_t next_plan_before_time;
+ do {
+ ops->current_plan++;
+
+ if (ops->current_plan >= ops->plan.entries) {
+ ops->current_plan = old_plan;
+ ops->current_plan_expire_time = ops->r->internal.qt->window.before;
+ // let the query run with current plan
+ // we will not switch it
+ return;
+ }
+
+ next_plan_before_time = ops->plan.data[ops->current_plan].before;
+ } while(now >= next_plan_before_time || last_point_end_time >= next_plan_before_time);
+
+ if(!query_metric_is_valid_tier(ops->qm, ops->plan.data[ops->current_plan].tier)) {
+ ops->current_plan = old_plan;
+ ops->current_plan_expire_time = ops->r->internal.qt->window.before;
+ return;
+ }
+
+ if(ops->finalize) {
+ ops->finalize(&ops->handle);
+ ops->finalize = NULL;
+ ops->is_finished = NULL;
+ }
+
+ // internal_error(true, "QUERY: switched plan to %zu (all is %zu), previous expiration was %ld, this starts at %ld, now is %ld, last_point_end_time %ld", ops->current_plan, ops->plan.entries, ops->plan.data[ops->current_plan-1].before, ops->plan.data[ops->current_plan].after, now, last_point_end_time);
+
+ query_planer_activate_plan(ops, ops->current_plan, MIN(now, last_point_end_time));
+}
+
+static int compare_query_plan_entries_on_start_time(const void *a, const void *b) {
+ QUERY_PLAN_ENTRY *p1 = (QUERY_PLAN_ENTRY *)a;
+ QUERY_PLAN_ENTRY *p2 = (QUERY_PLAN_ENTRY *)b;
+ return (p1->after < p2->after)?-1:1;
+}
+
+static bool query_plan(QUERY_ENGINE_OPS *ops, time_t after_wanted, time_t before_wanted, size_t points_wanted) {
+ //BUFFER *wb = buffer_create(1000);
+ //buffer_sprintf(wb, "QUERY PLAN for chart '%s' dimension '%s', from %ld to %ld:", rd->rrdset->name, rd->name, after_wanted, before_wanted);
+
+ // put our selected tier as the first plan
+ size_t selected_tier;
+
+ if(ops->r->internal.query_options & RRDR_OPTION_SELECTED_TIER
+ && ops->r->internal.qt->window.tier < storage_tiers
+ && query_metric_is_valid_tier(ops->qm, ops->r->internal.qt->window.tier)) {
+ selected_tier = ops->r->internal.qt->window.tier;
+ }
+ else {
+ selected_tier = query_metric_best_tier_for_timeframe(ops->qm, after_wanted, before_wanted, points_wanted);
+
+ if(ops->r->internal.query_options & RRDR_OPTION_SELECTED_TIER)
+ ops->r->internal.query_options &= ~RRDR_OPTION_SELECTED_TIER;
+ }
+
+ ops->plan.entries = 1;
+ ops->plan.data[0].tier = selected_tier;
+ ops->plan.data[0].after = ops->qm->tiers[selected_tier].db_first_time_t;
+ ops->plan.data[0].before = ops->qm->tiers[selected_tier].db_last_time_t;
+
+ if(!(ops->r->internal.query_options & RRDR_OPTION_SELECTED_TIER)) {
+ // the selected tier
+ time_t selected_tier_first_time_t = ops->plan.data[0].after;
+ time_t selected_tier_last_time_t = ops->plan.data[0].before;
+
+ //buffer_sprintf(wb, ": SELECTED tier %zu, from %ld to %ld", selected_tier, ops->plan.data[0].after, ops->plan.data[0].before);
+
+ // check if our selected tier can start the query
+ if (selected_tier_first_time_t > after_wanted) {
+ // we need some help from other tiers
+ for (size_t tr = (int)selected_tier + 1; tr < storage_tiers; tr++) {
+ if(!query_metric_is_valid_tier(ops->qm, tr))
+ continue;
+
+ // find the first time of this tier
+ time_t first_time_t = ops->qm->tiers[tr].db_first_time_t;
+
+ //buffer_sprintf(wb, ": EVAL AFTER tier %d, %ld", tier, first_time_t);
+
+ // can it help?
+ if (first_time_t < selected_tier_first_time_t) {
+ // it can help us add detail at the beginning of the query
+ QUERY_PLAN_ENTRY t = {
+ .tier = tr,
+ .after = (first_time_t < after_wanted) ? after_wanted : first_time_t,
+ .before = selected_tier_first_time_t};
+ ops->plan.data[ops->plan.entries++] = t;
+
+ // prepare for the tier
+ selected_tier_first_time_t = t.after;
+
+ if (t.after <= after_wanted)
+ break;
+ }
+ }
+ }
+
+ // check if our selected tier can finish the query
+ if (selected_tier_last_time_t < before_wanted) {
+ // we need some help from other tiers
+ for (int tr = (int)selected_tier - 1; tr >= 0; tr--) {
+ if(!query_metric_is_valid_tier(ops->qm, tr))
+ continue;
+
+ // find the last time of this tier
+ time_t last_time_t = ops->qm->tiers[tr].db_last_time_t;
+
+ //buffer_sprintf(wb, ": EVAL BEFORE tier %d, %ld", tier, last_time_t);
+
+ // can it help?
+ if (last_time_t > selected_tier_last_time_t) {
+ // it can help us add detail at the end of the query
+ QUERY_PLAN_ENTRY t = {
+ .tier = tr,
+ .after = selected_tier_last_time_t,
+ .before = (last_time_t > before_wanted) ? before_wanted : last_time_t};
+ ops->plan.data[ops->plan.entries++] = t;
+
+ // prepare for the tier
+ selected_tier_last_time_t = t.before;
+
+ if (t.before >= before_wanted)
+ break;
+ }
+ }
+ }
+ }
+
+ // sort the query plan
+ if(ops->plan.entries > 1)
+ qsort(&ops->plan.data, ops->plan.entries, sizeof(QUERY_PLAN_ENTRY), compare_query_plan_entries_on_start_time);
+
+ // make sure it has the whole timeframe we need
+ if(ops->plan.data[0].after < after_wanted)
+ ops->plan.data[0].after = after_wanted;
+
+ if(ops->plan.data[ops->plan.entries - 1].before > before_wanted)
+ ops->plan.data[ops->plan.entries - 1].before = before_wanted;
+
+ //buffer_sprintf(wb, ": FINAL STEPS %zu", ops->plan.entries);
+
+ //for(size_t i = 0; i < ops->plan.entries ;i++)
+ // buffer_sprintf(wb, ": STEP %zu = use tier %zu from %ld to %ld", i+1, ops->plan.data[i].tier, ops->plan.data[i].after, ops->plan.data[i].before);
+
+ //internal_error(true, "%s", buffer_tostring(wb));
+
+ if(!query_metric_is_valid_tier(ops->qm, ops->plan.data[0].tier))
+ return false;
+
+ query_planer_activate_plan(ops, 0, 0);
+
+ return true;
+}
+
+
+// ----------------------------------------------------------------------------
+// dimension level query engine
+
+#define query_interpolate_point(this_point, last_point, now) do { \
+ if(likely( \
+ /* the point to interpolate is more than 1s wide */ \
+ (this_point).end_time - (this_point).start_time > 1 \
+ \
+ /* the two points are exactly next to each other */ \
+ && (last_point).end_time == (this_point).start_time \
+ \
+ /* both points are valid numbers */ \
+ && netdata_double_isnumber((this_point).value) \
+ && netdata_double_isnumber((last_point).value) \
+ \
+ )) { \
+ (this_point).value = (last_point).value + ((this_point).value - (last_point).value) * (1.0 - (NETDATA_DOUBLE)((this_point).end_time - (now)) / (NETDATA_DOUBLE)((this_point).end_time - (this_point).start_time)); \
+ (this_point).end_time = now; \
+ } \
+} while(0)
+
+#define query_add_point_to_group(r, point, ops) do { \
+ if(likely(netdata_double_isnumber((point).value))) { \
+ if(likely(fpclassify((point).value) != FP_ZERO)) \
+ (ops).group_points_non_zero++; \
+ \
+ if(unlikely((point).flags & SN_FLAG_RESET)) \
+ (ops).group_value_flags |= RRDR_VALUE_RESET; \
+ \
+ (ops).grouping_add(r, (point).value); \
+ } \
+ \
+ (ops).group_points_added++; \
+ (ops).group_anomaly_rate += (point).anomaly; \
+} while(0)
+
+static inline void rrd2rrdr_do_dimension(RRDR *r, size_t dim_id_in_rrdr) {
+ QUERY_TARGET *qt = r->internal.qt;
+ QUERY_METRIC *qm = &qt->query.array[dim_id_in_rrdr];
+ size_t points_wanted = qt->window.points;
+ time_t after_wanted = qt->window.after;
+ time_t before_wanted = qt->window.before;
+
+// bool debug_this = false;
+// if(strcmp("user", string2str(rd->id)) == 0 && strcmp("system.cpu", string2str(rd->rrdset->id)) == 0)
+// debug_this = true;
+
+ time_t max_date = 0,
+ min_date = 0;
+
+ size_t points_added = 0;
+
+ QUERY_ENGINE_OPS ops = {
+ .r = r,
+ .qm = qm,
+ .grouping_add = r->internal.grouping_add,
+ .grouping_flush = r->internal.grouping_flush,
+ .tier_query_fetch = r->internal.tier_query_fetch,
+ .view_update_every = r->update_every,
+ .query_granularity = (time_t)(r->update_every / r->group),
+ .group_value_flags = RRDR_VALUE_NOTHING
+ };
+
+ long rrdr_line = -1;
+ bool use_anomaly_bit_as_value = (r->internal.query_options & RRDR_OPTION_ANOMALY_BIT) ? true : false;
+
+ if(!query_plan(&ops, after_wanted, before_wanted, points_wanted))
+ return;
+
+ NETDATA_DOUBLE min = r->min, max = r->max;
+
+ QUERY_POINT last2_point = QUERY_POINT_EMPTY;
+ QUERY_POINT last1_point = QUERY_POINT_EMPTY;
+ QUERY_POINT new_point = QUERY_POINT_EMPTY;
+
+ time_t now_start_time = after_wanted - ops.query_granularity;
+ time_t now_end_time = after_wanted + ops.view_update_every - ops.query_granularity;
+
+ size_t db_points_read_since_plan_switch = 0; (void)db_points_read_since_plan_switch;
+
+ // The main loop, based on the query granularity we need
+ for( ; points_added < points_wanted ; now_start_time = now_end_time, now_end_time += ops.view_update_every) {
+
+ if(unlikely(query_plan_should_switch_plan(ops, now_end_time))) {
+ query_planer_next_plan(&ops, now_end_time, new_point.end_time);
+ db_points_read_since_plan_switch = 0;
+ }
+
+ // read all the points of the db, prior to the time we need (now_end_time)
+
+ size_t count_same_end_time = 0;
+ while(count_same_end_time < 100) {
+ if(likely(count_same_end_time == 0)) {
+ last2_point = last1_point;
+ last1_point = new_point;
+ }
+
+ if(unlikely(ops.is_finished(&ops.handle))) {
+ if(count_same_end_time != 0) {
+ last2_point = last1_point;
+ last1_point = new_point;
+ }
+ new_point = QUERY_POINT_EMPTY;
+ new_point.start_time = last1_point.end_time;
+ new_point.end_time = now_end_time;
+//
+// if(debug_this) info("QUERY: is finished() returned true");
+//
+ break;
+ }
+
+ // fetch the new point
+ {
+ db_points_read_since_plan_switch++;
+ STORAGE_POINT sp = ops.next_metric(&ops.handle);
+
+ ops.db_points_read_per_tier[ops.tier]++;
+ ops.db_total_points_read++;
+
+ new_point.start_time = sp.start_time;
+ new_point.end_time = sp.end_time;
+ new_point.anomaly = sp.count ? (NETDATA_DOUBLE)sp.anomaly_count * 100.0 / (NETDATA_DOUBLE)sp.count : 0.0;
+ query_point_set_id(new_point, ops.db_total_points_read);
+
+// if(debug_this)
+// info("QUERY: got point %zu, from time %ld to %ld // now from %ld to %ld // query from %ld to %ld",
+// new_point.id, new_point.start_time, new_point.end_time, now_start_time, now_end_time, after_wanted, before_wanted);
+//
+ // set the right value to the point we got
+ if(likely(!storage_point_is_unset(sp) && !storage_point_is_empty(sp))) {
+
+ if(unlikely(use_anomaly_bit_as_value))
+ new_point.value = new_point.anomaly;
+
+ else {
+ switch (ops.tier_query_fetch) {
+ default:
+ case TIER_QUERY_FETCH_AVERAGE:
+ new_point.value = sp.sum / sp.count;
+ break;
+
+ case TIER_QUERY_FETCH_MIN:
+ new_point.value = sp.min;
+ break;
+
+ case TIER_QUERY_FETCH_MAX:
+ new_point.value = sp.max;
+ break;
+
+ case TIER_QUERY_FETCH_SUM:
+ new_point.value = sp.sum;
+ break;
+ };
+ }
+ }
+ else {
+ new_point.value = NAN;
+ new_point.flags = SN_FLAG_NONE;
+ }
+ }
+
+ // check if the db is giving us zero duration points
+ if(unlikely(new_point.start_time == new_point.end_time)) {
+ internal_error(true, "QUERY: '%s', dimension '%s' next_metric() returned point %zu start time %ld, end time %ld, that are both equal",
+ qt->id, string2str(qm->dimension.id), new_point.id, new_point.start_time, new_point.end_time);
+
+ new_point.start_time = new_point.end_time - ops.tier_ptr->db_update_every;
+ }
+
+ // check if the db is advancing the query
+ if(unlikely(new_point.end_time <= last1_point.end_time)) {
+ internal_error(db_points_read_since_plan_switch > 1,
+ "QUERY: '%s', dimension '%s' next_metric() returned point %zu from %ld to %ld, before the last point %zu from %ld to %ld, now is %ld to %ld",
+ qt->id, string2str(qm->dimension.id), new_point.id, new_point.start_time, new_point.end_time,
+ last1_point.id, last1_point.start_time, last1_point.end_time, now_start_time, now_end_time);
+
+ count_same_end_time++;
+ continue;
+ }
+ count_same_end_time = 0;
+
+ // decide how to use this point
+ if(likely(new_point.end_time < now_end_time)) { // likely to favor tier0
+ // this db point ends before our now_end_time
+
+ if(likely(new_point.end_time >= now_start_time)) { // likely to favor tier0
+ // this db point ends after our now_start time
+
+ query_add_point_to_group(r, new_point, ops);
+ }
+ else {
+ // we don't need this db point
+ // it is totally outside our current time-frame
+
+ // this is desirable for the first point of the query
+ // because it allows us to interpolate the next point
+ // at exactly the time we will want
+
+ // we only log if this is not point 1
+ internal_error(new_point.end_time < after_wanted && new_point.id > 1,
+ "QUERY: '%s', dimension '%s' next_metric() returned point %zu from %ld time %ld, which is entirely before our current timeframe %ld to %ld (and before the entire query, after %ld, before %ld)",
+ qt->id, string2str(qm->dimension.id),
+ new_point.id, new_point.start_time, new_point.end_time,
+ now_start_time, now_end_time,
+ after_wanted, before_wanted);
+ }
+
+ }
+ else {
+ // the point ends in the future
+ // so, we will interpolate it below, at the inner loop
+ break;
+ }
+ }
+
+ if(unlikely(count_same_end_time)) {
+ internal_error(true,
+ "QUERY: '%s', dimension '%s', the database does not advance the query, it returned an end time less or equal to the end time of the last point we got %ld, %zu times",
+ qt->id, string2str(qm->dimension.id), last1_point.end_time, count_same_end_time);
+
+ if(unlikely(new_point.end_time <= last1_point.end_time))
+ new_point.end_time = now_end_time;
+ }
+
+ // the inner loop
+ // we have 3 points in memory: last2, last1, new
+ // we select the one to use based on their timestamps
+
+ size_t iterations = 0;
+ for ( ; now_end_time <= new_point.end_time && points_added < points_wanted ;
+ now_end_time += ops.view_update_every, iterations++) {
+
+ // now_start_time is wrong in this loop
+ // but, we don't need it
+
+ QUERY_POINT current_point;
+
+ if(likely(now_end_time > new_point.start_time)) {
+ // it is time for our NEW point to be used
+ current_point = new_point;
+ query_interpolate_point(current_point, last1_point, now_end_time);
+
+// internal_error(current_point.id > 0
+// && last1_point.id == 0
+// && current_point.end_time > after_wanted
+// && current_point.end_time > now_end_time,
+// "QUERY: '%s', dimension '%s', after %ld, before %ld, view update every %ld,"
+// " query granularity %ld, interpolating point %zu (from %ld to %ld) at %ld,"
+// " but we could really favor by having last_point1 in this query.",
+// qt->id, string2str(qm->dimension.id),
+// after_wanted, before_wanted,
+// ops.view_update_every, ops.query_granularity,
+// current_point.id, current_point.start_time, current_point.end_time,
+// now_end_time);
+ }
+ else if(likely(now_end_time <= last1_point.end_time)) {
+ // our LAST point is still valid
+ current_point = last1_point;
+ query_interpolate_point(current_point, last2_point, now_end_time);
+
+// internal_error(current_point.id > 0
+// && last2_point.id == 0
+// && current_point.end_time > after_wanted
+// && current_point.end_time > now_end_time,
+// "QUERY: '%s', dimension '%s', after %ld, before %ld, view update every %ld,"
+// " query granularity %ld, interpolating point %zu (from %ld to %ld) at %ld,"
+// " but we could really favor by having last_point2 in this query.",
+// qt->id, string2str(qm->dimension.id),
+// after_wanted, before_wanted, ops.view_update_every, ops.query_granularity,
+// current_point.id, current_point.start_time, current_point.end_time,
+// now_end_time);
+ }
+ else {
+ // a GAP, we don't have a value this time
+ current_point = QUERY_POINT_EMPTY;
+ }
+
+ query_add_point_to_group(r, current_point, ops);
+
+ rrdr_line = rrdr_line_init(r, now_end_time, rrdr_line);
+ size_t rrdr_o_v_index = rrdr_line * r->d + dim_id_in_rrdr;
+
+ if(unlikely(!min_date)) min_date = now_end_time;
+ max_date = now_end_time;
+
+ // find the place to store our values
+ RRDR_VALUE_FLAGS *rrdr_value_options_ptr = &r->o[rrdr_o_v_index];
+
+ // update the dimension options
+ if(likely(ops.group_points_non_zero))
+ r->od[dim_id_in_rrdr] |= RRDR_DIMENSION_NONZERO;
+
+ // store the specific point options
+ *rrdr_value_options_ptr = ops.group_value_flags;
+
+ // store the group value
+ NETDATA_DOUBLE group_value = ops.grouping_flush(r, rrdr_value_options_ptr);
+ r->v[rrdr_o_v_index] = group_value;
+
+ // we only store uint8_t anomaly rates,
+ // so let's get double precision by storing
+ // anomaly rates in the range 0 - 200
+ r->ar[rrdr_o_v_index] = ops.group_anomaly_rate / (NETDATA_DOUBLE)ops.group_points_added;
+
+ if(likely(points_added || dim_id_in_rrdr)) {
+ // find the min/max across all dimensions
+
+ if(unlikely(group_value < min)) min = group_value;
+ if(unlikely(group_value > max)) max = group_value;
+
+ }
+ else {
+ // runs only when dim_id_in_rrdr == 0 && points_added == 0
+ // so, on the first point added for the query.
+ min = max = group_value;
+ }
+
+ points_added++;
+ ops.group_points_added = 0;
+ ops.group_value_flags = RRDR_VALUE_NOTHING;
+ ops.group_points_non_zero = 0;
+ ops.group_anomaly_rate = 0;
+ }
+ // the loop above increased "now" by query_granularity,
+ // but the main loop will increase it too,
+ // so, let's undo the last iteration of this loop
+ if(iterations)
+ now_end_time -= ops.view_update_every;
+ }
+ ops.finalize(&ops.handle);
+
+ r->internal.result_points_generated += points_added;
+ r->internal.db_points_read += ops.db_total_points_read;
+ for(size_t tr = 0; tr < storage_tiers ; tr++)
+ r->internal.tier_points_read[tr] += ops.db_points_read_per_tier[tr];
+
+ r->min = min;
+ r->max = max;
+ r->before = max_date;
+ r->after = min_date - ops.view_update_every + ops.query_granularity;
+ rrdr_done(r, rrdr_line);
+
+ internal_error(points_added != points_wanted,
+ "QUERY: '%s', dimension '%s', requested %zu points, but RRDR added %zu (%zu db points read).",
+ qt->id, string2str(qm->dimension.id),
+ (size_t)points_wanted, (size_t)points_added, ops.db_total_points_read);
+}
+
+// ----------------------------------------------------------------------------
+// fill the gap of a tier
+
+void store_metric_at_tier(RRDDIM *rd, size_t tier, struct rrddim_tier *t, STORAGE_POINT sp, usec_t now_ut);
+void store_metric_collection_completed(void);
+
+void rrdr_fill_tier_gap_from_smaller_tiers(RRDDIM *rd, size_t tier, time_t now) {
+ if(unlikely(tier >= storage_tiers)) return;
+ if(storage_tiers_backfill[tier] == RRD_BACKFILL_NONE) return;
+
+ struct rrddim_tier *t = rd->tiers[tier];
+ if(unlikely(!t)) return;
+
+ time_t latest_time_t = t->query_ops->latest_time(t->db_metric_handle);
+ time_t granularity = (time_t)t->tier_grouping * (time_t)rd->update_every;
+ time_t time_diff = now - latest_time_t;
+
+ // if the user wants only NEW backfilling, and we don't have any data
+ if(storage_tiers_backfill[tier] == RRD_BACKFILL_NEW && latest_time_t <= 0) return;
+
+ // there is really nothing we can do
+ if(now <= latest_time_t || time_diff < granularity) return;
+
+ struct storage_engine_query_handle handle;
+
+ // for each lower tier
+ for(int read_tier = (int)tier - 1; read_tier >= 0 ; read_tier--){
+ time_t smaller_tier_first_time = rd->tiers[read_tier]->query_ops->oldest_time(rd->tiers[read_tier]->db_metric_handle);
+ time_t smaller_tier_last_time = rd->tiers[read_tier]->query_ops->latest_time(rd->tiers[read_tier]->db_metric_handle);
+ if(smaller_tier_last_time <= latest_time_t) continue; // it is as bad as we are
+
+ long after_wanted = (latest_time_t < smaller_tier_first_time) ? smaller_tier_first_time : latest_time_t;
+ long before_wanted = smaller_tier_last_time;
+
+ struct rrddim_tier *tmp = rd->tiers[read_tier];
+ tmp->query_ops->init(tmp->db_metric_handle, &handle, after_wanted, before_wanted);
+
+ size_t points_read = 0;
+
+ while(!tmp->query_ops->is_finished(&handle)) {
+
+ STORAGE_POINT sp = tmp->query_ops->next_metric(&handle);
+ points_read++;
+
+ if(sp.end_time > latest_time_t) {
+ latest_time_t = sp.end_time;
+ store_metric_at_tier(rd, tier, t, sp, sp.end_time * USEC_PER_SEC);
+ }
+ }
+
+ tmp->query_ops->finalize(&handle);
+ store_metric_collection_completed();
+ global_statistics_backfill_query_completed(points_read);
+
+ //internal_error(true, "DBENGINE: backfilled chart '%s', dimension '%s', tier %d, from %ld to %ld, with %zu points from tier %d",
+ // rd->rrdset->name, rd->name, tier, after_wanted, before_wanted, points, tr);
+ }
+}
+
+// ----------------------------------------------------------------------------
+// fill RRDR for the whole chart
+
+#ifdef NETDATA_INTERNAL_CHECKS
+static void rrd2rrdr_log_request_response_metadata(RRDR *r
+ , RRDR_OPTIONS options __maybe_unused
+ , RRDR_GROUPING group_method
+ , bool aligned
+ , size_t group
+ , time_t resampling_time
+ , size_t resampling_group
+ , time_t after_wanted
+ , time_t after_requested
+ , time_t before_wanted
+ , time_t before_requested
+ , size_t points_requested
+ , size_t points_wanted
+ //, size_t after_slot
+ //, size_t before_slot
+ , const char *msg
+ ) {
+
+ time_t first_entry_t = r->internal.qt->db.first_time_t;
+ time_t last_entry_t = r->internal.qt->db.last_time_t;
+
+ internal_error(
+ true,
+ "rrd2rrdr() on %s update every %ld with %s grouping %s (group: %zu, resampling_time: %ld, resampling_group: %zu), "
+ "after (got: %ld, want: %ld, req: %ld, db: %ld), "
+ "before (got: %ld, want: %ld, req: %ld, db: %ld), "
+ "duration (got: %ld, want: %ld, req: %ld, db: %ld), "
+ "points (got: %zu, want: %zu, req: %zu), "
+ "%s"
+ , r->internal.qt->id
+ , r->internal.qt->window.query_granularity
+
+ // grouping
+ , (aligned) ? "aligned" : "unaligned"
+ , group_method2string(group_method)
+ , group
+ , resampling_time
+ , resampling_group
+
+ // after
+ , r->after
+ , after_wanted
+ , after_requested
+ , first_entry_t
+
+ // before
+ , r->before
+ , before_wanted
+ , before_requested
+ , last_entry_t
+
+ // duration
+ , (long)(r->before - r->after + r->internal.qt->window.query_granularity)
+ , (long)(before_wanted - after_wanted + r->internal.qt->window.query_granularity)
+ , (long)before_requested - after_requested
+ , (long)((last_entry_t - first_entry_t) + r->internal.qt->window.query_granularity)
+
+ // points
+ , r->rows
+ , points_wanted
+ , points_requested
+
+ // message
+ , msg
+ );
+}
+#endif // NETDATA_INTERNAL_CHECKS
+
+// Returns 1 if an absolute period was requested or 0 if it was a relative period
+bool rrdr_relative_window_to_absolute(time_t *after, time_t *before) {
+ time_t now = now_realtime_sec() - 1;
+
+ int absolute_period_requested = -1;
+ long long after_requested, before_requested;
+
+ before_requested = *before;
+ after_requested = *after;
+
+ // allow relative for before (smaller than API_RELATIVE_TIME_MAX)
+ if(ABS(before_requested) <= API_RELATIVE_TIME_MAX) {
+ // if the user asked for a positive relative time,
+ // flip it to a negative
+ if(before_requested > 0)
+ before_requested = -before_requested;
+
+ before_requested = now + before_requested;
+ absolute_period_requested = 0;
+ }
+
+ // allow relative for after (smaller than API_RELATIVE_TIME_MAX)
+ if(ABS(after_requested) <= API_RELATIVE_TIME_MAX) {
+ if(after_requested > 0)
+ after_requested = -after_requested;
+
+ // if the user didn't give an after, use the number of points
+ // to give a sane default
+ if(after_requested == 0)
+ after_requested = -600;
+
+ // since the query engine now returns inclusive timestamps
+ // it is awkward to return 6 points when after=-5 is given
+ // so for relative queries we add 1 second, to give
+ // more predictable results to users.
+ after_requested = before_requested + after_requested + 1;
+ absolute_period_requested = 0;
+ }
+
+ if(absolute_period_requested == -1)
+ absolute_period_requested = 1;
+
+ // check if the parameters are flipped
+ if(after_requested > before_requested) {
+ long long t = before_requested;
+ before_requested = after_requested;
+ after_requested = t;
+ }
+
+ // if the query requests future data
+ // shift the query back to be in the present time
+ // (this may also happen because of the rules above)
+ if(before_requested > now) {
+ long long delta = before_requested - now;
+ before_requested -= delta;
+ after_requested -= delta;
+ }
+
+ time_t absolute_minimum_time = now - (10 * 365 * 86400);
+ time_t absolute_maximum_time = now + (1 * 365 * 86400);
+
+ if (after_requested < absolute_minimum_time && !unittest_running)
+ after_requested = absolute_minimum_time;
+
+ if (after_requested > absolute_maximum_time && !unittest_running)
+ after_requested = absolute_maximum_time;
+
+ if (before_requested < absolute_minimum_time && !unittest_running)
+ before_requested = absolute_minimum_time;
+
+ if (before_requested > absolute_maximum_time && !unittest_running)
+ before_requested = absolute_maximum_time;
+
+ *before = before_requested;
+ *after = after_requested;
+
+ return (absolute_period_requested != 1);
+}
+
+// #define DEBUG_QUERY_LOGIC 1
+
+#ifdef DEBUG_QUERY_LOGIC
+#define query_debug_log_init() BUFFER *debug_log = buffer_create(1000)
+#define query_debug_log(args...) buffer_sprintf(debug_log, ##args)
+#define query_debug_log_fin() { \
+ info("QUERY: '%s', after:%ld, before:%ld, duration:%ld, points:%zu, res:%ld - wanted => after:%ld, before:%ld, points:%zu, group:%zu, granularity:%ld, resgroup:%ld, resdiv:" NETDATA_DOUBLE_FORMAT_AUTO " %s", qt->id, after_requested, before_requested, before_requested - after_requested, points_requested, resampling_time_requested, after_wanted, before_wanted, points_wanted, group, query_granularity, resampling_group, resampling_divisor, buffer_tostring(debug_log)); \
+ buffer_free(debug_log); \
+ debug_log = NULL; \
+ }
+#define query_debug_log_free() do { buffer_free(debug_log); } while(0)
+#else
+#define query_debug_log_init() debug_dummy()
+#define query_debug_log(args...) debug_dummy()
+#define query_debug_log_fin() debug_dummy()
+#define query_debug_log_free() debug_dummy()
+#endif
+
+bool query_target_calculate_window(QUERY_TARGET *qt) {
+ if (unlikely(!qt)) return false;
+
+ size_t points_requested = (long)qt->request.points;
+ time_t after_requested = qt->request.after;
+ time_t before_requested = qt->request.before;
+ RRDR_GROUPING group_method = qt->request.group_method;
+ time_t resampling_time_requested = qt->request.resampling_time;
+ RRDR_OPTIONS options = qt->request.options;
+ size_t tier = qt->request.tier;
+ time_t update_every = qt->db.minimum_latest_update_every;
+
+ // RULES
+ // points_requested = 0
+ // the user wants all the natural points the database has
+ //
+ // after_requested = 0
+ // the user wants to start the query from the oldest point in our database
+ //
+ // before_requested = 0
+ // the user wants the query to end to the latest point in our database
+ //
+ // when natural points are wanted, the query has to be aligned to the update_every
+ // of the database
+
+ size_t points_wanted = points_requested;
+ time_t after_wanted = after_requested;
+ time_t before_wanted = before_requested;
+
+ bool aligned = !(options & RRDR_OPTION_NOT_ALIGNED);
+ bool automatic_natural_points = (points_wanted == 0);
+ bool relative_period_requested = false;
+ bool natural_points = (options & RRDR_OPTION_NATURAL_POINTS) || automatic_natural_points;
+ bool before_is_aligned_to_db_end = false;
+
+ query_debug_log_init();
+
+ if (ABS(before_requested) <= API_RELATIVE_TIME_MAX || ABS(after_requested) <= API_RELATIVE_TIME_MAX) {
+ relative_period_requested = true;
+ natural_points = true;
+ options |= RRDR_OPTION_NATURAL_POINTS;
+ query_debug_log(":relative+natural");
+ }
+
+ // if the user wants virtual points, make sure we do it
+ if (options & RRDR_OPTION_VIRTUAL_POINTS)
+ natural_points = false;
+
+ // set the right flag about natural and virtual points
+ if (natural_points) {
+ options |= RRDR_OPTION_NATURAL_POINTS;
+
+ if (options & RRDR_OPTION_VIRTUAL_POINTS)
+ options &= ~RRDR_OPTION_VIRTUAL_POINTS;
+ }
+ else {
+ options |= RRDR_OPTION_VIRTUAL_POINTS;
+
+ if (options & RRDR_OPTION_NATURAL_POINTS)
+ options &= ~RRDR_OPTION_NATURAL_POINTS;
+ }
+
+ if (after_wanted == 0 || before_wanted == 0) {
+ relative_period_requested = true;
+
+ time_t first_entry_t = qt->db.first_time_t;
+ time_t last_entry_t = qt->db.last_time_t;
+
+ if (first_entry_t == 0 || last_entry_t == 0) {
+ internal_error(true, "QUERY: no data detected on query '%s' (db first_entry_t = %ld, last_entry_t = %ld", qt->id, first_entry_t, last_entry_t);
+ query_debug_log_free();
+ return false;
+ }
+
+ query_debug_log(":first_entry_t %ld, last_entry_t %ld", first_entry_t, last_entry_t);
+
+ if (after_wanted == 0) {
+ after_wanted = first_entry_t;
+ query_debug_log(":zero after_wanted %ld", after_wanted);
+ }
+
+ if (before_wanted == 0) {
+ before_wanted = last_entry_t;
+ before_is_aligned_to_db_end = true;
+ query_debug_log(":zero before_wanted %ld", before_wanted);
+ }
+
+ if (points_wanted == 0) {
+ points_wanted = (last_entry_t - first_entry_t) / update_every;
+ query_debug_log(":zero points_wanted %zu", points_wanted);
+ }
+ }
+
+ if (points_wanted == 0) {
+ points_wanted = 600;
+ query_debug_log(":zero600 points_wanted %zu", points_wanted);
+ }
+
+ // convert our before_wanted and after_wanted to absolute
+ rrdr_relative_window_to_absolute(&after_wanted, &before_wanted);
+ query_debug_log(":relative2absolute after %ld, before %ld", after_wanted, before_wanted);
+
+ if (natural_points && (options & RRDR_OPTION_SELECTED_TIER) && tier > 0 && storage_tiers > 1) {
+ update_every = rrdset_find_natural_update_every_for_timeframe(
+ qt, after_wanted, before_wanted, points_wanted, options, tier);
+
+ if (update_every <= 0) update_every = qt->db.minimum_latest_update_every;
+ query_debug_log(":natural update every %ld", update_every);
+ }
+
+ // this is the update_every of the query
+ // it may be different to the update_every of the database
+ time_t query_granularity = (natural_points) ? update_every : 1;
+ if (query_granularity <= 0) query_granularity = 1;
+ query_debug_log(":query_granularity %ld", query_granularity);
+
+ // align before_wanted and after_wanted to query_granularity
+ if (before_wanted % query_granularity) {
+ before_wanted -= before_wanted % query_granularity;
+ query_debug_log(":granularity align before_wanted %ld", before_wanted);
+ }
+
+ if (after_wanted % query_granularity) {
+ after_wanted -= after_wanted % query_granularity;
+ query_debug_log(":granularity align after_wanted %ld", after_wanted);
+ }
+
+ // automatic_natural_points is set when the user wants all the points available in the database
+ if (automatic_natural_points) {
+ points_wanted = (before_wanted - after_wanted + 1) / query_granularity;
+ if (unlikely(points_wanted <= 0)) points_wanted = 1;
+ query_debug_log(":auto natural points_wanted %zu", points_wanted);
+ }
+
+ time_t duration = before_wanted - after_wanted;
+
+ // if the resampling time is too big, extend the duration to the past
+ if (unlikely(resampling_time_requested > duration)) {
+ after_wanted = before_wanted - resampling_time_requested;
+ duration = before_wanted - after_wanted;
+ query_debug_log(":resampling after_wanted %ld", after_wanted);
+ }
+
+ // if the duration is not aligned to resampling time
+ // extend the duration to the past, to avoid a gap at the chart
+ // only when the missing duration is above 1/10th of a point
+ if (resampling_time_requested > query_granularity && duration % resampling_time_requested) {
+ time_t delta = duration % resampling_time_requested;
+ if (delta > resampling_time_requested / 10) {
+ after_wanted -= resampling_time_requested - delta;
+ duration = before_wanted - after_wanted;
+ query_debug_log(":resampling2 after_wanted %ld", after_wanted);
+ }
+ }
+
+ // the available points of the query
+ size_t points_available = (duration + 1) / query_granularity;
+ if (unlikely(points_available <= 0)) points_available = 1;
+ query_debug_log(":points_available %zu", points_available);
+
+ if (points_wanted > points_available) {
+ points_wanted = points_available;
+ query_debug_log(":max points_wanted %zu", points_wanted);
+ }
+
+ if(points_wanted > 86400 && !unittest_running) {
+ points_wanted = 86400;
+ query_debug_log(":absolute max points_wanted %zu", points_wanted);
+ }
+
+ // calculate the desired grouping of source data points
+ size_t group = points_available / points_wanted;
+ if (group == 0) group = 1;
+
+ // round "group" to the closest integer
+ if (points_available % points_wanted > points_wanted / 2)
+ group++;
+
+ query_debug_log(":group %zu", group);
+
+ if (points_wanted * group * query_granularity < (size_t)duration) {
+ // the grouping we are going to do, is not enough
+ // to cover the entire duration requested, so
+ // we have to change the number of points, to make sure we will
+ // respect the timeframe as closely as possibly
+
+ // let's see how many points are the optimal
+ points_wanted = points_available / group;
+
+ if (points_wanted * group < points_available)
+ points_wanted++;
+
+ if (unlikely(points_wanted == 0))
+ points_wanted = 1;
+
+ query_debug_log(":optimal points %zu", points_wanted);
+ }
+
+ // resampling_time_requested enforces a certain grouping multiple
+ NETDATA_DOUBLE resampling_divisor = 1.0;
+ size_t resampling_group = 1;
+ if (unlikely(resampling_time_requested > query_granularity)) {
+ // the points we should group to satisfy gtime
+ resampling_group = resampling_time_requested / query_granularity;
+ if (unlikely(resampling_time_requested % query_granularity))
+ resampling_group++;
+
+ query_debug_log(":resampling group %zu", resampling_group);
+
+ // adapt group according to resampling_group
+ if (unlikely(group < resampling_group)) {
+ group = resampling_group; // do not allow grouping below the desired one
+ query_debug_log(":group less res %zu", group);
+ }
+ if (unlikely(group % resampling_group)) {
+ group += resampling_group - (group % resampling_group); // make sure group is multiple of resampling_group
+ query_debug_log(":group mod res %zu", group);
+ }
+
+ // resampling_divisor = group / resampling_group;
+ resampling_divisor = (NETDATA_DOUBLE) (group * query_granularity) / (NETDATA_DOUBLE) resampling_time_requested;
+ query_debug_log(":resampling divisor " NETDATA_DOUBLE_FORMAT, resampling_divisor);
+ }
+
+ // now that we have group, align the requested timeframe to fit it.
+ if (aligned && before_wanted % (group * query_granularity)) {
+ if (before_is_aligned_to_db_end)
+ before_wanted -= before_wanted % (time_t)(group * query_granularity);
+ else
+ before_wanted += (time_t)(group * query_granularity) - before_wanted % (time_t)(group * query_granularity);
+ query_debug_log(":align before_wanted %ld", before_wanted);
+ }
+
+ after_wanted = before_wanted - (time_t)(points_wanted * group * query_granularity) + query_granularity;
+ query_debug_log(":final after_wanted %ld", after_wanted);
+
+ duration = before_wanted - after_wanted;
+ query_debug_log(":final duration %ld", duration + 1);
+
+ query_debug_log_fin();
+
+ internal_error(points_wanted != duration / (query_granularity * group) + 1,
+ "QUERY: points_wanted %zu is not points %zu",
+ points_wanted, (size_t)(duration / (query_granularity * group) + 1));
+
+ internal_error(group < resampling_group,
+ "QUERY: group %zu is less than the desired group points %zu",
+ group, resampling_group);
+
+ internal_error(group > resampling_group && group % resampling_group,
+ "QUERY: group %zu is not a multiple of the desired group points %zu",
+ group, resampling_group);
+
+ // -------------------------------------------------------------------------
+ // update QUERY_TARGET with our calculations
+
+ qt->window.after = after_wanted;
+ qt->window.before = before_wanted;
+ qt->window.relative = relative_period_requested;
+ qt->window.points = points_wanted;
+ qt->window.group = group;
+ qt->window.group_method = group_method;
+ qt->window.group_options = qt->request.group_options;
+ qt->window.query_granularity = query_granularity;
+ qt->window.resampling_group = resampling_group;
+ qt->window.resampling_divisor = resampling_divisor;
+ qt->window.options = options;
+ qt->window.tier = tier;
+ qt->window.aligned = aligned;
+
+ return true;
+}
+
+RRDR *rrd2rrdr_legacy(
+ ONEWAYALLOC *owa,
+ RRDSET *st, size_t points, time_t after, time_t before,
+ RRDR_GROUPING group_method, time_t resampling_time, RRDR_OPTIONS options, const char *dimensions,
+ const char *group_options, time_t timeout, size_t tier, QUERY_SOURCE query_source) {
+
+ QUERY_TARGET_REQUEST qtr = {
+ .st = st,
+ .points = points,
+ .after = after,
+ .before = before,
+ .group_method = group_method,
+ .resampling_time = resampling_time,
+ .options = options,
+ .dimensions = dimensions,
+ .group_options = group_options,
+ .timeout = timeout,
+ .tier = tier,
+ .query_source = query_source,
+ };
+
+ return rrd2rrdr(owa, query_target_create(&qtr));
+}
+
+RRDR *rrd2rrdr(ONEWAYALLOC *owa, QUERY_TARGET *qt) {
+ if(!qt)
+ return NULL;
+
+ if(!owa) {
+ query_target_release(qt);
+ return NULL;
+ }
+
+ // qt.window members are the WANTED ones.
+ // qt.request members are the REQUESTED ones.
+
+ RRDR *r = rrdr_create(owa, qt);
+ if(unlikely(!r)) {
+ internal_error(true, "QUERY: cannot create RRDR for %s, after=%ld, before=%ld, points=%zu",
+ qt->id, qt->window.after, qt->window.before, qt->window.points);
+ return NULL;
+ }
+
+ if(unlikely(!r->d || !qt->window.points)) {
+ internal_error(true, "QUERY: returning empty RRDR (no dimensions in RRDSET) for %s, after=%ld, before=%ld, points=%zu",
+ qt->id, qt->window.after, qt->window.before, qt->window.points);
+ return r;
+ }
+
+ if(qt->window.relative)
+ r->result_options |= RRDR_RESULT_OPTION_RELATIVE;
+ else
+ r->result_options |= RRDR_RESULT_OPTION_ABSOLUTE;
+
+ // -------------------------------------------------------------------------
+ // initialize RRDR
+
+ r->group = qt->window.group;
+ r->update_every = (int) (qt->window.group * qt->window.query_granularity);
+ r->before = qt->window.before;
+ r->after = qt->window.after;
+ r->internal.points_wanted = qt->window.points;
+ r->internal.resampling_group = qt->window.resampling_group;
+ r->internal.resampling_divisor = qt->window.resampling_divisor;
+ r->internal.query_options = qt->window.options;
+
+ // -------------------------------------------------------------------------
+ // assign the processor functions
+ rrdr_set_grouping_function(r, qt->window.group_method);
+
+ // allocate any memory required by the grouping method
+ r->internal.grouping_create(r, qt->window.group_options);
+
+ // -------------------------------------------------------------------------
+ // do the work for each dimension
+
+ time_t max_after = 0, min_before = 0;
+ size_t max_rows = 0;
+
+ long dimensions_used = 0, dimensions_nonzero = 0;
+ struct timeval query_start_time;
+ struct timeval query_current_time;
+ if (qt->request.timeout)
+ now_realtime_timeval(&query_start_time);
+
+ for(size_t c = 0, max = qt->query.used; c < max ; c++) {
+ // set the query target dimension options to rrdr
+ r->od[c] = qt->query.array[c].dimension.options;
+
+ r->od[c] |= RRDR_DIMENSION_SELECTED;
+
+ // reset the grouping for the new dimension
+ r->internal.grouping_reset(r);
+
+ rrd2rrdr_do_dimension(r, c);
+ if (qt->request.timeout)
+ now_realtime_timeval(&query_current_time);
+
+ if(r->od[c] & RRDR_DIMENSION_NONZERO)
+ dimensions_nonzero++;
+
+ // verify all dimensions are aligned
+ if(unlikely(!dimensions_used)) {
+ min_before = r->before;
+ max_after = r->after;
+ max_rows = r->rows;
+ }
+ else {
+ if(r->after != max_after) {
+ internal_error(true, "QUERY: 'after' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
+ string2str(qt->query.array[c].dimension.id), (size_t)max_after, string2str(qt->query.array[c].dimension.name), (size_t)r->after);
+
+ r->after = (r->after > max_after) ? r->after : max_after;
+ }
+
+ if(r->before != min_before) {
+ internal_error(true, "QUERY: 'before' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
+ string2str(qt->query.array[c].dimension.id), (size_t)min_before, string2str(qt->query.array[c].dimension.name), (size_t)r->before);
+
+ r->before = (r->before < min_before) ? r->before : min_before;
+ }
+
+ if(r->rows != max_rows) {
+ internal_error(true, "QUERY: 'rows' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
+ string2str(qt->query.array[c].dimension.id), (size_t)max_rows, string2str(qt->query.array[c].dimension.name), (size_t)r->rows);
+
+ r->rows = (r->rows > max_rows) ? r->rows : max_rows;
+ }
+ }
+
+ dimensions_used++;
+ if (qt->request.timeout && ((NETDATA_DOUBLE)dt_usec(&query_start_time, &query_current_time) / 1000.0) > (NETDATA_DOUBLE)qt->request.timeout) {
+ log_access("QUERY CANCELED RUNTIME EXCEEDED %0.2f ms (LIMIT %lld ms)",
+ (NETDATA_DOUBLE)dt_usec(&query_start_time, &query_current_time) / 1000.0, (long long)qt->request.timeout);
+ r->result_options |= RRDR_RESULT_OPTION_CANCEL;
+ break;
+ }
+ }
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ if (dimensions_used) {
+ if(r->internal.log)
+ rrd2rrdr_log_request_response_metadata(r, qt->window.options, qt->window.group_method, qt->window.aligned, qt->window.group, qt->request.resampling_time, qt->window.resampling_group,
+ qt->window.after, qt->request.after, qt->window.before, qt->request.before,
+ qt->request.points, qt->window.points, /*after_slot, before_slot,*/
+ r->internal.log);
+
+ if(r->rows != qt->window.points)
+ rrd2rrdr_log_request_response_metadata(r, qt->window.options, qt->window.group_method, qt->window.aligned, qt->window.group, qt->request.resampling_time, qt->window.resampling_group,
+ qt->window.after, qt->request.after, qt->window.before, qt->request.before,
+ qt->request.points, qt->window.points, /*after_slot, before_slot,*/
+ "got 'points' is not wanted 'points'");
+
+ if(qt->window.aligned && (r->before % (qt->window.group * qt->window.query_granularity)) != 0)
+ rrd2rrdr_log_request_response_metadata(r, qt->window.options, qt->window.group_method, qt->window.aligned, qt->window.group, qt->request.resampling_time, qt->window.resampling_group,
+ qt->window.after, qt->request.after, qt->window.before,qt->request.before,
+ qt->request.points, qt->window.points, /*after_slot, before_slot,*/
+ "'before' is not aligned but alignment is required");
+
+ // 'after' should not be aligned, since we start inside the first group
+ //if(qt->window.aligned && (r->after % group) != 0)
+ // rrd2rrdr_log_request_response_metadata(r, qt->window.options, qt->window.group_method, qt->window.aligned, qt->window.group, qt->request.resampling_time, qt->window.resampling_group, qt->window.after, after_requested, before_wanted, before_requested, points_requested, points_wanted, after_slot, before_slot, "'after' is not aligned but alignment is required");
+
+ if(r->before != qt->window.before)
+ rrd2rrdr_log_request_response_metadata(r, qt->window.options, qt->window.group_method, qt->window.aligned, qt->window.group, qt->request.resampling_time, qt->window.resampling_group,
+ qt->window.after, qt->request.after, qt->window.before, qt->request.before,
+ qt->request.points, qt->window.points, /*after_slot, before_slot,*/
+ "chart is not aligned to requested 'before'");
+
+ if(r->before != qt->window.before)
+ rrd2rrdr_log_request_response_metadata(r, qt->window.options, qt->window.group_method, qt->window.aligned, qt->window.group, qt->request.resampling_time, qt->window.resampling_group,
+ qt->window.after, qt->request.after, qt->window.before, qt->request.before,
+ qt->request.points, qt->window.points, /*after_slot, before_slot,*/
+ "got 'before' is not wanted 'before'");
+
+ // reported 'after' varies, depending on group
+ if(r->after != qt->window.after)
+ rrd2rrdr_log_request_response_metadata(r, qt->window.options, qt->window.group_method, qt->window.aligned, qt->window.group, qt->request.resampling_time, qt->window.resampling_group,
+ qt->window.after, qt->request.after, qt->window.before, qt->request.before,
+ qt->request.points, qt->window.points, /*after_slot, before_slot,*/
+ "got 'after' is not wanted 'after'");
+
+ }
+#endif
+
+ // free all resources used by the grouping method
+ r->internal.grouping_free(r);
+
+ // when all the dimensions are zero, we should return all of them
+ if(unlikely((qt->window.options & RRDR_OPTION_NONZERO) && !dimensions_nonzero && !(r->result_options & RRDR_RESULT_OPTION_CANCEL))) {
+ // all the dimensions are zero
+ // mark them as NONZERO to send them all
+ for(size_t c = 0, max = qt->query.used; c < max ; c++) {
+ if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
+ r->od[c] |= RRDR_DIMENSION_NONZERO;
+ }
+ }
+
+ global_statistics_rrdr_query_completed(dimensions_used, r->internal.db_points_read,
+ r->internal.result_points_generated, qt->request.query_source);
+ return r;
+}
diff --git a/web/api/queries/query.h b/web/api/queries/query.h
new file mode 100644
index 0000000..ebad5a1
--- /dev/null
+++ b/web/api/queries/query.h
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_DATA_QUERY_H
+#define NETDATA_API_DATA_QUERY_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef enum rrdr_grouping {
+ RRDR_GROUPING_UNDEFINED = 0,
+ RRDR_GROUPING_AVERAGE,
+ RRDR_GROUPING_MIN,
+ RRDR_GROUPING_MAX,
+ RRDR_GROUPING_SUM,
+ RRDR_GROUPING_INCREMENTAL_SUM,
+ RRDR_GROUPING_TRIMMED_MEAN1,
+ RRDR_GROUPING_TRIMMED_MEAN2,
+ RRDR_GROUPING_TRIMMED_MEAN3,
+ RRDR_GROUPING_TRIMMED_MEAN5,
+ RRDR_GROUPING_TRIMMED_MEAN10,
+ RRDR_GROUPING_TRIMMED_MEAN15,
+ RRDR_GROUPING_TRIMMED_MEAN20,
+ RRDR_GROUPING_TRIMMED_MEAN25,
+ RRDR_GROUPING_MEDIAN,
+ RRDR_GROUPING_TRIMMED_MEDIAN1,
+ RRDR_GROUPING_TRIMMED_MEDIAN2,
+ RRDR_GROUPING_TRIMMED_MEDIAN3,
+ RRDR_GROUPING_TRIMMED_MEDIAN5,
+ RRDR_GROUPING_TRIMMED_MEDIAN10,
+ RRDR_GROUPING_TRIMMED_MEDIAN15,
+ RRDR_GROUPING_TRIMMED_MEDIAN20,
+ RRDR_GROUPING_TRIMMED_MEDIAN25,
+ RRDR_GROUPING_PERCENTILE25,
+ RRDR_GROUPING_PERCENTILE50,
+ RRDR_GROUPING_PERCENTILE75,
+ RRDR_GROUPING_PERCENTILE80,
+ RRDR_GROUPING_PERCENTILE90,
+ RRDR_GROUPING_PERCENTILE95,
+ RRDR_GROUPING_PERCENTILE97,
+ RRDR_GROUPING_PERCENTILE98,
+ RRDR_GROUPING_PERCENTILE99,
+ RRDR_GROUPING_STDDEV,
+ RRDR_GROUPING_CV,
+ RRDR_GROUPING_SES,
+ RRDR_GROUPING_DES,
+ RRDR_GROUPING_COUNTIF,
+} RRDR_GROUPING;
+
+const char *group_method2string(RRDR_GROUPING group);
+void web_client_api_v1_init_grouping(void);
+RRDR_GROUPING web_client_api_request_v1_data_group(const char *name, RRDR_GROUPING def);
+const char *web_client_api_request_v1_data_group_to_string(RRDR_GROUPING group);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif //NETDATA_API_DATA_QUERY_H
diff --git a/web/api/queries/rrdr.c b/web/api/queries/rrdr.c
new file mode 100644
index 0000000..676224c
--- /dev/null
+++ b/web/api/queries/rrdr.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "rrdr.h"
+
+/*
+static void rrdr_dump(RRDR *r)
+{
+ long c, i;
+ RRDDIM *d;
+
+ fprintf(stderr, "\nCHART %s (%s)\n", r->st->id, r->st->name);
+
+ for(c = 0, d = r->st->dimensions; d ;c++, d = d->next) {
+ fprintf(stderr, "DIMENSION %s (%s), %s%s%s%s\n"
+ , d->id
+ , d->name
+ , (r->od[c] & RRDR_EMPTY)?"EMPTY ":""
+ , (r->od[c] & RRDR_RESET)?"RESET ":""
+ , (r->od[c] & RRDR_DIMENSION_HIDDEN)?"HIDDEN ":""
+ , (r->od[c] & RRDR_DIMENSION_NONZERO)?"NONZERO ":""
+ );
+ }
+
+ if(r->rows <= 0) {
+ fprintf(stderr, "RRDR does not have any values in it.\n");
+ return;
+ }
+
+ fprintf(stderr, "RRDR includes %d values in it:\n", r->rows);
+
+ // for each line in the array
+ for(i = 0; i < r->rows ;i++) {
+ NETDATA_DOUBLE *cn = &r->v[ i * r->d ];
+ RRDR_DIMENSION_FLAGS *co = &r->o[ i * r->d ];
+
+ // print the id and the timestamp of the line
+ fprintf(stderr, "%ld %ld ", i + 1, r->t[i]);
+
+ // for each dimension
+ for(c = 0, d = r->st->dimensions; d ;c++, d = d->next) {
+ if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
+ if(unlikely(!(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
+
+ if(co[c] & RRDR_EMPTY)
+ fprintf(stderr, "null ");
+ else
+ fprintf(stderr, NETDATA_DOUBLE_FORMAT " %s%s%s%s "
+ , cn[c]
+ , (co[c] & RRDR_EMPTY)?"E":" "
+ , (co[c] & RRDR_RESET)?"R":" "
+ , (co[c] & RRDR_DIMENSION_HIDDEN)?"H":" "
+ , (co[c] & RRDR_DIMENSION_NONZERO)?"N":" "
+ );
+ }
+
+ fprintf(stderr, "\n");
+ }
+}
+*/
+
+inline void rrdr_free(ONEWAYALLOC *owa, RRDR *r) {
+ if(unlikely(!r)) return;
+
+ query_target_release(r->internal.qt);
+ onewayalloc_freez(owa, r->t);
+ onewayalloc_freez(owa, r->v);
+ onewayalloc_freez(owa, r->o);
+ onewayalloc_freez(owa, r->od);
+ onewayalloc_freez(owa, r->ar);
+ onewayalloc_freez(owa, r);
+}
+
+RRDR *rrdr_create(ONEWAYALLOC *owa, QUERY_TARGET *qt) {
+ if(unlikely(!qt || !qt->query.used || !qt->window.points))
+ return NULL;
+
+ size_t dimensions = qt->query.used;
+ size_t points = qt->window.points;
+
+ // create the rrdr
+ RRDR *r = onewayalloc_callocz(owa, 1, sizeof(RRDR));
+ r->internal.owa = owa;
+ r->internal.qt = qt;
+
+ r->before = qt->window.before;
+ r->after = qt->window.after;
+ r->internal.points_wanted = qt->window.points;
+ r->d = (int)dimensions;
+ r->n = (int)points;
+
+ r->t = onewayalloc_callocz(owa, points, sizeof(time_t));
+ r->v = onewayalloc_mallocz(owa, points * dimensions * sizeof(NETDATA_DOUBLE));
+ r->o = onewayalloc_mallocz(owa, points * dimensions * sizeof(RRDR_VALUE_FLAGS));
+ r->ar = onewayalloc_mallocz(owa, points * dimensions * sizeof(NETDATA_DOUBLE));
+ r->od = onewayalloc_mallocz(owa, dimensions * sizeof(RRDR_DIMENSION_FLAGS));
+
+ r->group = 1;
+ r->update_every = 1;
+
+ return r;
+}
diff --git a/web/api/queries/rrdr.h b/web/api/queries/rrdr.h
new file mode 100644
index 0000000..6151cdd
--- /dev/null
+++ b/web/api/queries/rrdr.h
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_QUERIES_RRDR_H
+#define NETDATA_QUERIES_RRDR_H
+
+#include "libnetdata/libnetdata.h"
+#include "web/api/queries/query.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef enum tier_query_fetch {
+ TIER_QUERY_FETCH_SUM,
+ TIER_QUERY_FETCH_MIN,
+ TIER_QUERY_FETCH_MAX,
+ TIER_QUERY_FETCH_AVERAGE
+} TIER_QUERY_FETCH;
+
+typedef enum rrdr_options {
+ RRDR_OPTION_NONZERO = 0x00000001, // don't output dimensions with just zero values
+ RRDR_OPTION_REVERSED = 0x00000002, // output the rows in reverse order (oldest to newest)
+ RRDR_OPTION_ABSOLUTE = 0x00000004, // values positive, for DATASOURCE_SSV before summing
+ RRDR_OPTION_MIN2MAX = 0x00000008, // when adding dimensions, use max - min, instead of sum
+ RRDR_OPTION_SECONDS = 0x00000010, // output seconds, instead of dates
+ RRDR_OPTION_MILLISECONDS = 0x00000020, // output milliseconds, instead of dates
+ RRDR_OPTION_NULL2ZERO = 0x00000040, // do not show nulls, convert them to zeros
+ RRDR_OPTION_OBJECTSROWS = 0x00000080, // each row of values should be an object, not an array
+ RRDR_OPTION_GOOGLE_JSON = 0x00000100, // comply with google JSON/JSONP specs
+ RRDR_OPTION_JSON_WRAP = 0x00000200, // wrap the response in a JSON header with info about the result
+ RRDR_OPTION_LABEL_QUOTES = 0x00000400, // in CSV output, wrap header labels in double quotes
+ RRDR_OPTION_PERCENTAGE = 0x00000800, // give values as percentage of total
+ RRDR_OPTION_NOT_ALIGNED = 0x00001000, // do not align charts for persistent timeframes
+ RRDR_OPTION_DISPLAY_ABS = 0x00002000, // for badges, display the absolute value, but calculate colors with sign
+ RRDR_OPTION_MATCH_IDS = 0x00004000, // when filtering dimensions, match only IDs
+ RRDR_OPTION_MATCH_NAMES = 0x00008000, // when filtering dimensions, match only names
+ RRDR_OPTION_NATURAL_POINTS = 0x00020000, // return the natural points of the database
+ RRDR_OPTION_VIRTUAL_POINTS = 0x00040000, // return virtual points
+ RRDR_OPTION_ANOMALY_BIT = 0x00080000, // Return the anomaly bit stored in each collected_number
+ RRDR_OPTION_RETURN_RAW = 0x00100000, // Return raw data for aggregating across multiple nodes
+ RRDR_OPTION_RETURN_JWAR = 0x00200000, // Return anomaly rates in jsonwrap
+ RRDR_OPTION_SELECTED_TIER = 0x00400000, // Use the selected tier for the query
+ RRDR_OPTION_ALL_DIMENSIONS = 0x00800000, // Return the full dimensions list
+
+ // internal ones - not to be exposed to the API
+ RRDR_OPTION_INTERNAL_AR = 0x10000000, // internal use only, to let the formatters we want to render the anomaly rate
+ RRDR_OPTION_HEALTH_RSRVD1 = 0x80000000, // reserved for RRDCALC_OPTION_NO_CLEAR_NOTIFICATION
+} RRDR_OPTIONS;
+
+typedef enum rrdr_value_flag {
+ RRDR_VALUE_NOTHING = 0x00, // no flag set (a good default)
+ RRDR_VALUE_EMPTY = 0x01, // the database value is empty
+ RRDR_VALUE_RESET = 0x02, // the database value is marked as reset (overflown)
+} RRDR_VALUE_FLAGS;
+
+typedef enum rrdr_dimension_flag {
+ RRDR_DIMENSION_DEFAULT = 0x00,
+ RRDR_DIMENSION_HIDDEN = 0x04, // the dimension is hidden (not to be presented to callers)
+ RRDR_DIMENSION_NONZERO = 0x08, // the dimension is non zero (contains non-zero values)
+ RRDR_DIMENSION_SELECTED = 0x10, // the dimension is selected for evaluation in this RRDR
+} RRDR_DIMENSION_FLAGS;
+
+// RRDR result options
+typedef enum rrdr_result_flags {
+ RRDR_RESULT_OPTION_ABSOLUTE = 0x00000001, // the query uses absolute time-frames
+ // (can be cached by browsers and proxies)
+ RRDR_RESULT_OPTION_RELATIVE = 0x00000002, // the query uses relative time-frames
+ // (should not to be cached by browsers and proxies)
+ RRDR_RESULT_OPTION_VARIABLE_STEP = 0x00000004, // the query uses variable-step time-frames
+ RRDR_RESULT_OPTION_CANCEL = 0x00000008, // the query needs to be cancelled
+} RRDR_RESULT_OPTIONS;
+
+typedef struct rrdresult {
+ RRDR_RESULT_OPTIONS result_options; // RRDR_RESULT_OPTION_*
+
+ size_t d; // the number of dimensions
+ size_t n; // the number of values in the arrays
+ size_t rows; // the number of rows used
+
+ RRDR_DIMENSION_FLAGS *od; // the options for the dimensions
+
+ time_t *t; // array of n timestamps
+ NETDATA_DOUBLE *v; // array n x d values
+ RRDR_VALUE_FLAGS *o; // array n x d options for each value returned
+ NETDATA_DOUBLE *ar; // array n x d of anomaly rates (0 - 100)
+
+ size_t group; // how many collected values were grouped for each row
+ time_t update_every; // what is the suggested update frequency in seconds
+
+ NETDATA_DOUBLE min;
+ NETDATA_DOUBLE max;
+
+ time_t before;
+ time_t after;
+
+ // internal rrd2rrdr() members below this point
+ struct {
+ ONEWAYALLOC *owa; // the allocator used
+ struct query_target *qt; // the QUERY_TARGET
+
+ RRDR_OPTIONS query_options; // RRDR_OPTION_* (as run by the query)
+
+ size_t points_wanted; // used by SES and DES
+ size_t resampling_group; // used by AVERAGE
+ NETDATA_DOUBLE resampling_divisor; // used by AVERAGE
+
+ // grouping function pointers
+ void (*grouping_create)(struct rrdresult *r, const char *options);
+ void (*grouping_reset)(struct rrdresult *r);
+ void (*grouping_free)(struct rrdresult *r);
+ void (*grouping_add)(struct rrdresult *r, NETDATA_DOUBLE value);
+ NETDATA_DOUBLE (*grouping_flush)(struct rrdresult *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+
+ TIER_QUERY_FETCH tier_query_fetch; // which value to use from STORAGE_POINT
+ void *grouping_data; // the internal data of the grouping function
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ const char *log;
+#endif
+
+ // statistics
+ size_t db_points_read;
+ size_t result_points_generated;
+ size_t tier_points_read[RRD_STORAGE_TIERS];
+ } internal;
+} RRDR;
+
+#define rrdr_rows(r) ((r)->rows)
+
+#include "database/rrd.h"
+void rrdr_free(ONEWAYALLOC *owa, RRDR *r);
+RRDR *rrdr_create(ONEWAYALLOC *owa, struct query_target *qt);
+
+#include "../web_api_v1.h"
+#include "web/api/queries/query.h"
+
+RRDR *rrd2rrdr_legacy(
+ ONEWAYALLOC *owa,
+ RRDSET *st, size_t points, time_t after, time_t before,
+ RRDR_GROUPING group_method, time_t resampling_time, RRDR_OPTIONS options, const char *dimensions,
+ const char *group_options, time_t timeout, size_t tier, QUERY_SOURCE query_source);
+
+RRDR *rrd2rrdr(ONEWAYALLOC *owa, struct query_target *qt);
+bool query_target_calculate_window(struct query_target *qt);
+
+bool rrdr_relative_window_to_absolute(time_t *after, time_t *before);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif //NETDATA_QUERIES_RRDR_H
diff --git a/web/api/queries/ses/Makefile.am b/web/api/queries/ses/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/web/api/queries/ses/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/queries/ses/README.md b/web/api/queries/ses/README.md
new file mode 100644
index 0000000..b835b81
--- /dev/null
+++ b/web/api/queries/ses/README.md
@@ -0,0 +1,61 @@
+<!--
+title: "Single (or Simple) Exponential Smoothing (`ses`)"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/ses/README.md
+-->
+
+# Single (or Simple) Exponential Smoothing (`ses`)
+
+> This query is also available as `ema` and `ewma`.
+
+An exponential moving average (`ema`), also known as an exponentially weighted moving average (`ewma`)
+is a first-order infinite impulse response filter that applies weighting factors which decrease
+exponentially. The weighting for each older datum decreases exponentially, never reaching zero.
+
+In simple terms, this is like an average value, but more recent values are given more weight.
+
+Netdata automatically adjusts the weight (`alpha`) based on the number of values processed,
+using the formula:
+
+```
+window = max(number of values, 15)
+alpha = 2 / (window + 1)
+```
+
+You can change the fixed value `15` by setting in `netdata.conf`:
+
+```
+[web]
+ ses max window = 15
+```
+
+## how to use
+
+Use it in alarms like this:
+
+```
+ alarm: my_alarm
+ on: my_chart
+lookup: ses -1m unaligned of my_dimension
+ warn: $this > 1000
+```
+
+`ses` does not change the units. For example, if the chart units is `requests/sec`, the exponential
+moving average will be again expressed in the same units.
+
+It can also be used in APIs and badges as `&group=ses` in the URL.
+
+## Examples
+
+Examining last 1 minute `successful` web server responses:
+
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average&value_color=yellow)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=ses&after=-60&label=single+exponential+smoothing&value_color=orange)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max)
+
+## References
+
+- <https://en.wikipedia.org/wiki/Moving_average#exponential-moving-average>
+- <https://en.wikipedia.org/wiki/Exponential_smoothing>.
+
+
diff --git a/web/api/queries/ses/ses.c b/web/api/queries/ses/ses.c
new file mode 100644
index 0000000..5e94002
--- /dev/null
+++ b/web/api/queries/ses/ses.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "ses.h"
+
+
+// ----------------------------------------------------------------------------
+// single exponential smoothing
+
+struct grouping_ses {
+ NETDATA_DOUBLE alpha;
+ NETDATA_DOUBLE alpha_other;
+ NETDATA_DOUBLE level;
+ size_t count;
+};
+
+static size_t max_window_size = 15;
+
+void grouping_init_ses(void) {
+ long long ret = config_get_number(CONFIG_SECTION_WEB, "ses max window", (long long)max_window_size);
+ if(ret <= 1) {
+ config_set_number(CONFIG_SECTION_WEB, "ses max window", (long long)max_window_size);
+ }
+ else {
+ max_window_size = (size_t) ret;
+ }
+}
+
+static inline NETDATA_DOUBLE window(RRDR *r, struct grouping_ses *g) {
+ (void)g;
+
+ NETDATA_DOUBLE points;
+ if(r->group == 1) {
+ // provide a running DES
+ points = (NETDATA_DOUBLE)r->internal.points_wanted;
+ }
+ else {
+ // provide a SES with flush points
+ points = (NETDATA_DOUBLE)r->group;
+ }
+
+ return (points > (NETDATA_DOUBLE)max_window_size) ? (NETDATA_DOUBLE)max_window_size : points;
+}
+
+static inline void set_alpha(RRDR *r, struct grouping_ses *g) {
+ // https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average
+ // A commonly used value for alpha is 2 / (N + 1)
+ g->alpha = 2.0 / (window(r, g) + 1.0);
+ g->alpha_other = 1.0 - g->alpha;
+}
+
+void grouping_create_ses(RRDR *r, const char *options __maybe_unused) {
+ struct grouping_ses *g = (struct grouping_ses *)onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_ses));
+ set_alpha(r, g);
+ g->level = 0.0;
+ r->internal.grouping_data = g;
+}
+
+// resets when switches dimensions
+// so, clear everything to restart
+void grouping_reset_ses(RRDR *r) {
+ struct grouping_ses *g = (struct grouping_ses *)r->internal.grouping_data;
+ g->level = 0.0;
+ g->count = 0;
+}
+
+void grouping_free_ses(RRDR *r) {
+ onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
+ r->internal.grouping_data = NULL;
+}
+
+void grouping_add_ses(RRDR *r, NETDATA_DOUBLE value) {
+ struct grouping_ses *g = (struct grouping_ses *)r->internal.grouping_data;
+
+ if(unlikely(!g->count))
+ g->level = value;
+
+ g->level = g->alpha * value + g->alpha_other * g->level;
+ g->count++;
+}
+
+NETDATA_DOUBLE grouping_flush_ses(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct grouping_ses *g = (struct grouping_ses *)r->internal.grouping_data;
+
+ if(unlikely(!g->count || !netdata_double_isnumber(g->level))) {
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ return 0.0;
+ }
+
+ return g->level;
+}
diff --git a/web/api/queries/ses/ses.h b/web/api/queries/ses/ses.h
new file mode 100644
index 0000000..79b09fb
--- /dev/null
+++ b/web/api/queries/ses/ses.h
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_QUERIES_SES_H
+#define NETDATA_API_QUERIES_SES_H
+
+#include "../query.h"
+#include "../rrdr.h"
+
+void grouping_init_ses(void);
+
+void grouping_create_ses(RRDR *r, const char *options __maybe_unused);
+void grouping_reset_ses(RRDR *r);
+void grouping_free_ses(RRDR *r);
+void grouping_add_ses(RRDR *r, NETDATA_DOUBLE value);
+NETDATA_DOUBLE grouping_flush_ses(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+
+#endif //NETDATA_API_QUERIES_SES_H
diff --git a/web/api/queries/stddev/Makefile.am b/web/api/queries/stddev/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/web/api/queries/stddev/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/queries/stddev/README.md b/web/api/queries/stddev/README.md
new file mode 100644
index 0000000..2fca47d
--- /dev/null
+++ b/web/api/queries/stddev/README.md
@@ -0,0 +1,93 @@
+<!--
+title: "standard deviation (`stddev`)"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/stddev/README.md
+-->
+
+# standard deviation (`stddev`)
+
+The standard deviation is a measure that is used to quantify the amount of variation or dispersion
+of a set of data values.
+
+A low standard deviation indicates that the data points tend to be close to the mean (also called the
+expected value) of the set, while a high standard deviation indicates that the data points are spread
+out over a wider range of values.
+
+## how to use
+
+Use it in alarms like this:
+
+```
+ alarm: my_alarm
+ on: my_chart
+lookup: stddev -1m unaligned of my_dimension
+ warn: $this > 1000
+```
+
+`stdev` does not change the units. For example, if the chart units is `requests/sec`, the standard
+deviation will be again expressed in the same units.
+
+It can also be used in APIs and badges as `&group=stddev` in the URL.
+
+## Examples
+
+Examining last 1 minute `successful` web server responses:
+
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=min&after=-60&label=min)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=average&after=-60&label=average&value_color=yellow)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=stddev&after=-60&label=standard+deviation&value_color=orange)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=max&after=-60&label=max)
+
+## References
+
+Check <https://en.wikipedia.org/wiki/Standard_deviation>.
+
+---
+
+# Coefficient of variation (`cv`)
+
+> This query is also available as `rsd`.
+
+The coefficient of variation (`cv`), also known as relative standard deviation (`rsd`),
+is a standardized measure of dispersion of a probability distribution or frequency distribution.
+
+It is defined as the ratio of the **standard deviation** to the **mean**.
+
+In simple terms, it gives the percentage of change. So, if the average value of a metric is 1000
+and its standard deviation is 100 (meaning that it variates from 900 to 1100), then `cv` is 10%.
+
+This is an easy way to check the % variation, without using absolute values.
+
+For example, you may trigger an alarm if your web server requests/sec `cv` is above 20 (`%`)
+over the last minute. So if your web server was serving 1000 reqs/sec over the last minute,
+it will trigger the alarm if had spikes below 800/sec or above 1200/sec.
+
+## how to use
+
+Use it in alarms like this:
+
+```
+ alarm: my_alarm
+ on: my_chart
+lookup: cv -1m unaligned of my_dimension
+ units: %
+ warn: $this > 20
+```
+
+The units reported by `cv` is always `%`.
+
+It can also be used in APIs and badges as `&group=cv` in the URL.
+
+## Examples
+
+Examining last 1 minute `successful` web server responses:
+
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=min&after=-60&label=min)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=average&after=-60&label=average&value_color=yellow)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=cv&after=-60&label=coefficient+of+variation&value_color=orange&units=pcent)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=max&after=-60&label=max)
+
+## References
+
+Check <https://en.wikipedia.org/wiki/Coefficient_of_variation>.
+
+
diff --git a/web/api/queries/stddev/stddev.c b/web/api/queries/stddev/stddev.c
new file mode 100644
index 0000000..92a67b4
--- /dev/null
+++ b/web/api/queries/stddev/stddev.c
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "stddev.h"
+
+
+// ----------------------------------------------------------------------------
+// stddev
+
+// this implementation comes from:
+// https://www.johndcook.com/blog/standard_deviation/
+
+struct grouping_stddev {
+ long count;
+ NETDATA_DOUBLE m_oldM, m_newM, m_oldS, m_newS;
+};
+
+void grouping_create_stddev(RRDR *r, const char *options __maybe_unused) {
+ r->internal.grouping_data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_stddev));
+}
+
+// resets when switches dimensions
+// so, clear everything to restart
+void grouping_reset_stddev(RRDR *r) {
+ struct grouping_stddev *g = (struct grouping_stddev *)r->internal.grouping_data;
+ g->count = 0;
+}
+
+void grouping_free_stddev(RRDR *r) {
+ onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
+ r->internal.grouping_data = NULL;
+}
+
+void grouping_add_stddev(RRDR *r, NETDATA_DOUBLE value) {
+ struct grouping_stddev *g = (struct grouping_stddev *)r->internal.grouping_data;
+
+ g->count++;
+
+ // See Knuth TAOCP vol 2, 3rd edition, page 232
+ if (g->count == 1) {
+ g->m_oldM = g->m_newM = value;
+ g->m_oldS = 0.0;
+ }
+ else {
+ g->m_newM = g->m_oldM + (value - g->m_oldM) / g->count;
+ g->m_newS = g->m_oldS + (value - g->m_oldM) * (value - g->m_newM);
+
+ // set up for next iteration
+ g->m_oldM = g->m_newM;
+ g->m_oldS = g->m_newS;
+ }
+}
+
+static inline NETDATA_DOUBLE mean(struct grouping_stddev *g) {
+ return (g->count > 0) ? g->m_newM : 0.0;
+}
+
+static inline NETDATA_DOUBLE variance(struct grouping_stddev *g) {
+ return ( (g->count > 1) ? g->m_newS/(NETDATA_DOUBLE)(g->count - 1) : 0.0 );
+}
+static inline NETDATA_DOUBLE stddev(struct grouping_stddev *g) {
+ return sqrtndd(variance(g));
+}
+
+NETDATA_DOUBLE grouping_flush_stddev(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct grouping_stddev *g = (struct grouping_stddev *)r->internal.grouping_data;
+
+ NETDATA_DOUBLE value;
+
+ if(likely(g->count > 1)) {
+ value = stddev(g);
+
+ if(!netdata_double_isnumber(value)) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ }
+ else if(g->count == 1) {
+ value = 0.0;
+ }
+ else {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+
+ grouping_reset_stddev(r);
+
+ return value;
+}
+
+// https://en.wikipedia.org/wiki/Coefficient_of_variation
+NETDATA_DOUBLE grouping_flush_coefficient_of_variation(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct grouping_stddev *g = (struct grouping_stddev *)r->internal.grouping_data;
+
+ NETDATA_DOUBLE value;
+
+ if(likely(g->count > 1)) {
+ NETDATA_DOUBLE m = mean(g);
+ value = 100.0 * stddev(g) / ((m < 0)? -m : m);
+
+ if(unlikely(!netdata_double_isnumber(value))) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ }
+ else if(g->count == 1) {
+ // one value collected
+ value = 0.0;
+ }
+ else {
+ // no values collected
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+
+ grouping_reset_stddev(r);
+
+ return value;
+}
+
+
+/*
+ * Mean = average
+ *
+NETDATA_DOUBLE grouping_flush_mean(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct grouping_stddev *g = (struct grouping_stddev *)r->internal.grouping_data;
+
+ NETDATA_DOUBLE value;
+
+ if(unlikely(!g->count)) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ else {
+ value = mean(g);
+
+ if(!isnormal(value)) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ }
+
+ grouping_reset_stddev(r);
+
+ return value;
+}
+ */
+
+/*
+ * It is not advised to use this version of variance directly
+ *
+NETDATA_DOUBLE grouping_flush_variance(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct grouping_stddev *g = (struct grouping_stddev *)r->internal.grouping_data;
+
+ NETDATA_DOUBLE value;
+
+ if(unlikely(!g->count)) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ else {
+ value = variance(g);
+
+ if(!isnormal(value)) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ }
+
+ grouping_reset_stddev(r);
+
+ return value;
+}
+*/ \ No newline at end of file
diff --git a/web/api/queries/stddev/stddev.h b/web/api/queries/stddev/stddev.h
new file mode 100644
index 0000000..4b8ffcd
--- /dev/null
+++ b/web/api/queries/stddev/stddev.h
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_QUERIES_STDDEV_H
+#define NETDATA_API_QUERIES_STDDEV_H
+
+#include "../query.h"
+#include "../rrdr.h"
+
+void grouping_create_stddev(RRDR *r, const char *options __maybe_unused);
+void grouping_reset_stddev(RRDR *r);
+void grouping_free_stddev(RRDR *r);
+void grouping_add_stddev(RRDR *r, NETDATA_DOUBLE value);
+NETDATA_DOUBLE grouping_flush_stddev(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+NETDATA_DOUBLE grouping_flush_coefficient_of_variation(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+// NETDATA_DOUBLE grouping_flush_mean(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+// NETDATA_DOUBLE grouping_flush_variance(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+
+#endif //NETDATA_API_QUERIES_STDDEV_H
diff --git a/web/api/queries/sum/Makefile.am b/web/api/queries/sum/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/web/api/queries/sum/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/queries/sum/README.md b/web/api/queries/sum/README.md
new file mode 100644
index 0000000..d4465bd
--- /dev/null
+++ b/web/api/queries/sum/README.md
@@ -0,0 +1,41 @@
+<!--
+title: "Sum"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/sum/README.md
+-->
+
+# Sum
+
+This module sums all the values in the time-frame requested.
+
+You can use `sum` to find the volume of something over a period.
+
+## how to use
+
+Use it in alarms like this:
+
+```
+ alarm: my_alarm
+ on: my_chart
+lookup: sum -1m unaligned of my_dimension
+ warn: $this > 1000
+```
+
+`sum` does not change the units. For example, if the chart units is `requests/sec`, the result
+will be again expressed in the same units.
+
+It can also be used in APIs and badges as `&group=sum` in the URL.
+
+## Examples
+
+Examining last 1 minute `successful` web server responses:
+
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=sum&after=-60&label=1m+sum&value_color=orange&units=requests)
+
+## References
+
+- <https://en.wikipedia.org/wiki/Summation>.
+
+
diff --git a/web/api/queries/sum/sum.c b/web/api/queries/sum/sum.c
new file mode 100644
index 0000000..eec6e2a
--- /dev/null
+++ b/web/api/queries/sum/sum.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "sum.h"
+
+// ----------------------------------------------------------------------------
+// sum
+
+struct grouping_sum {
+ NETDATA_DOUBLE sum;
+ size_t count;
+};
+
+void grouping_create_sum(RRDR *r, const char *options __maybe_unused) {
+ r->internal.grouping_data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_sum));
+}
+
+// resets when switches dimensions
+// so, clear everything to restart
+void grouping_reset_sum(RRDR *r) {
+ struct grouping_sum *g = (struct grouping_sum *)r->internal.grouping_data;
+ g->sum = 0;
+ g->count = 0;
+}
+
+void grouping_free_sum(RRDR *r) {
+ onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
+ r->internal.grouping_data = NULL;
+}
+
+void grouping_add_sum(RRDR *r, NETDATA_DOUBLE value) {
+ struct grouping_sum *g = (struct grouping_sum *)r->internal.grouping_data;
+ g->sum += value;
+ g->count++;
+}
+
+NETDATA_DOUBLE grouping_flush_sum(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct grouping_sum *g = (struct grouping_sum *)r->internal.grouping_data;
+
+ NETDATA_DOUBLE value;
+
+ if(unlikely(!g->count)) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ else {
+ value = g->sum;
+ }
+
+ g->sum = 0.0;
+ g->count = 0;
+
+ return value;
+}
+
+
diff --git a/web/api/queries/sum/sum.h b/web/api/queries/sum/sum.h
new file mode 100644
index 0000000..8987827
--- /dev/null
+++ b/web/api/queries/sum/sum.h
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_QUERY_SUM_H
+#define NETDATA_API_QUERY_SUM_H
+
+#include "../query.h"
+#include "../rrdr.h"
+
+void grouping_create_sum(RRDR *r, const char *options __maybe_unused);
+void grouping_reset_sum(RRDR *r);
+void grouping_free_sum(RRDR *r);
+void grouping_add_sum(RRDR *r, NETDATA_DOUBLE value);
+NETDATA_DOUBLE grouping_flush_sum(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+
+#endif //NETDATA_API_QUERY_SUM_H
diff --git a/web/api/queries/trimmed_mean/Makefile.am b/web/api/queries/trimmed_mean/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/web/api/queries/trimmed_mean/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/queries/trimmed_mean/README.md b/web/api/queries/trimmed_mean/README.md
new file mode 100644
index 0000000..71cdb85
--- /dev/null
+++ b/web/api/queries/trimmed_mean/README.md
@@ -0,0 +1,56 @@
+<!--
+title: "Trimmed Mean"
+description: "Use trimmed-mean in API queries and health entities to find the average value from a sample, eliminating any unwanted spikes in the returned metrics."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/trimmed_mean/README.md
+-->
+
+# Trimmed Mean
+
+The trimmed mean is the average value of a series excluding the smallest and biggest points.
+
+Netdata applies linear interpolation on the last point, if the percentage requested to be excluded does not give a
+round number of points.
+
+The following percentile aliases are defined:
+
+- `trimmed-mean1`
+- `trimmed-mean2`
+- `trimmed-mean3`
+- `trimmed-mean5`
+- `trimmed-mean10`
+- `trimmed-mean15`
+- `trimmed-mean20`
+- `trimmed-mean25`
+
+The default `trimmed-mean` is an alias for `trimmed-mean5`.
+Any percentage may be requested using the `group_options` query parameter.
+
+## how to use
+
+Use it in alarms like this:
+
+```
+ alarm: my_alarm
+ on: my_chart
+lookup: trimmed-mean5 -1m unaligned of my_dimension
+ warn: $this > 1000
+```
+
+`trimmed-mean` does not change the units. For example, if the chart units is `requests/sec`, the result
+will be again expressed in the same units.
+
+It can also be used in APIs and badges as `&group=trimmed-mean` in the URL and the additional parameter `group_options`
+may be used to request any percentage (e.g. `&group=trimmed-mean&group_options=29`).
+
+## Examples
+
+Examining last 1 minute `successful` web server responses:
+
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=trimmed-mean5&after=-60&label=trimmed-mean5&value_color=orange)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max)
+
+## References
+
+- <https://en.wikipedia.org/wiki/Truncated_mean>.
diff --git a/web/api/queries/trimmed_mean/trimmed_mean.c b/web/api/queries/trimmed_mean/trimmed_mean.c
new file mode 100644
index 0000000..2277208
--- /dev/null
+++ b/web/api/queries/trimmed_mean/trimmed_mean.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "trimmed_mean.h"
+
+// ----------------------------------------------------------------------------
+// median
+
+struct grouping_trimmed_mean {
+ size_t series_size;
+ size_t next_pos;
+ NETDATA_DOUBLE percent;
+
+ NETDATA_DOUBLE *series;
+};
+
+static void grouping_create_trimmed_mean_internal(RRDR *r, const char *options, NETDATA_DOUBLE def) {
+ long entries = r->group;
+ if(entries < 10) entries = 10;
+
+ struct grouping_trimmed_mean *g = (struct grouping_trimmed_mean *)onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_trimmed_mean));
+ g->series = onewayalloc_mallocz(r->internal.owa, entries * sizeof(NETDATA_DOUBLE));
+ g->series_size = (size_t)entries;
+
+ g->percent = def;
+ if(options && *options) {
+ g->percent = str2ndd(options, NULL);
+ if(!netdata_double_isnumber(g->percent)) g->percent = 0.0;
+ if(g->percent < 0.0) g->percent = 0.0;
+ if(g->percent > 50.0) g->percent = 50.0;
+ }
+
+ g->percent = 1.0 - ((g->percent / 100.0) * 2.0);
+ r->internal.grouping_data = g;
+}
+
+void grouping_create_trimmed_mean1(RRDR *r, const char *options) {
+ grouping_create_trimmed_mean_internal(r, options, 1.0);
+}
+void grouping_create_trimmed_mean2(RRDR *r, const char *options) {
+ grouping_create_trimmed_mean_internal(r, options, 2.0);
+}
+void grouping_create_trimmed_mean3(RRDR *r, const char *options) {
+ grouping_create_trimmed_mean_internal(r, options, 3.0);
+}
+void grouping_create_trimmed_mean5(RRDR *r, const char *options) {
+ grouping_create_trimmed_mean_internal(r, options, 5.0);
+}
+void grouping_create_trimmed_mean10(RRDR *r, const char *options) {
+ grouping_create_trimmed_mean_internal(r, options, 10.0);
+}
+void grouping_create_trimmed_mean15(RRDR *r, const char *options) {
+ grouping_create_trimmed_mean_internal(r, options, 15.0);
+}
+void grouping_create_trimmed_mean20(RRDR *r, const char *options) {
+ grouping_create_trimmed_mean_internal(r, options, 20.0);
+}
+void grouping_create_trimmed_mean25(RRDR *r, const char *options) {
+ grouping_create_trimmed_mean_internal(r, options, 25.0);
+}
+
+// resets when switches dimensions
+// so, clear everything to restart
+void grouping_reset_trimmed_mean(RRDR *r) {
+ struct grouping_trimmed_mean *g = (struct grouping_trimmed_mean *)r->internal.grouping_data;
+ g->next_pos = 0;
+}
+
+void grouping_free_trimmed_mean(RRDR *r) {
+ struct grouping_trimmed_mean *g = (struct grouping_trimmed_mean *)r->internal.grouping_data;
+ if(g) onewayalloc_freez(r->internal.owa, g->series);
+
+ onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
+ r->internal.grouping_data = NULL;
+}
+
+void grouping_add_trimmed_mean(RRDR *r, NETDATA_DOUBLE value) {
+ struct grouping_trimmed_mean *g = (struct grouping_trimmed_mean *)r->internal.grouping_data;
+
+ if(unlikely(g->next_pos >= g->series_size)) {
+ g->series = onewayalloc_doublesize( r->internal.owa, g->series, g->series_size * sizeof(NETDATA_DOUBLE));
+ g->series_size *= 2;
+ }
+
+ g->series[g->next_pos++] = value;
+}
+
+NETDATA_DOUBLE grouping_flush_trimmed_mean(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct grouping_trimmed_mean *g = (struct grouping_trimmed_mean *)r->internal.grouping_data;
+
+ NETDATA_DOUBLE value;
+ size_t available_slots = g->next_pos;
+
+ if(unlikely(!available_slots)) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ else if(available_slots == 1) {
+ value = g->series[0];
+ }
+ else {
+ sort_series(g->series, available_slots);
+
+ NETDATA_DOUBLE min = g->series[0];
+ NETDATA_DOUBLE max = g->series[available_slots - 1];
+
+ if (min != max) {
+ size_t slots_to_use = (size_t)((NETDATA_DOUBLE)available_slots * g->percent);
+ if(!slots_to_use) slots_to_use = 1;
+
+ NETDATA_DOUBLE percent_to_use = (NETDATA_DOUBLE)slots_to_use / (NETDATA_DOUBLE)available_slots;
+ NETDATA_DOUBLE percent_delta = g->percent - percent_to_use;
+
+ NETDATA_DOUBLE percent_interpolation_slot = 0.0;
+ NETDATA_DOUBLE percent_last_slot = 0.0;
+ if(percent_delta > 0.0) {
+ NETDATA_DOUBLE percent_to_use_plus_1_slot = (NETDATA_DOUBLE)(slots_to_use + 1) / (NETDATA_DOUBLE)available_slots;
+ NETDATA_DOUBLE percent_1slot = percent_to_use_plus_1_slot - percent_to_use;
+
+ percent_interpolation_slot = percent_delta / percent_1slot;
+ percent_last_slot = 1 - percent_interpolation_slot;
+ }
+
+ int start_slot, stop_slot, step, last_slot, interpolation_slot;
+ if(min >= 0.0 && max >= 0.0) {
+ start_slot = (int)((available_slots - slots_to_use) / 2);
+ stop_slot = start_slot + (int)slots_to_use;
+ last_slot = stop_slot - 1;
+ interpolation_slot = stop_slot;
+ step = 1;
+ }
+ else {
+ start_slot = (int)available_slots - 1 - (int)((available_slots - slots_to_use) / 2);
+ stop_slot = start_slot - (int)slots_to_use;
+ last_slot = stop_slot + 1;
+ interpolation_slot = stop_slot;
+ step = -1;
+ }
+
+ value = 0.0;
+ for(int slot = start_slot; slot != stop_slot ; slot += step)
+ value += g->series[slot];
+
+ size_t counted = slots_to_use;
+ if(percent_interpolation_slot > 0.0 && interpolation_slot >= 0 && interpolation_slot < (int)available_slots) {
+ value += g->series[interpolation_slot] * percent_interpolation_slot;
+ value += g->series[last_slot] * percent_last_slot;
+ counted++;
+ }
+
+ value = value / (NETDATA_DOUBLE)counted;
+ }
+ else
+ value = min;
+ }
+
+ if(unlikely(!netdata_double_isnumber(value))) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+
+ //log_series_to_stderr(g->series, g->next_pos, value, "trimmed_mean");
+
+ g->next_pos = 0;
+
+ return value;
+}
diff --git a/web/api/queries/trimmed_mean/trimmed_mean.h b/web/api/queries/trimmed_mean/trimmed_mean.h
new file mode 100644
index 0000000..e66d925
--- /dev/null
+++ b/web/api/queries/trimmed_mean/trimmed_mean.h
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_QUERIES_TRIMMED_MEAN_H
+#define NETDATA_API_QUERIES_TRIMMED_MEAN_H
+
+#include "../query.h"
+#include "../rrdr.h"
+
+void grouping_create_trimmed_mean1(RRDR *r, const char *options);
+void grouping_create_trimmed_mean2(RRDR *r, const char *options);
+void grouping_create_trimmed_mean3(RRDR *r, const char *options);
+void grouping_create_trimmed_mean5(RRDR *r, const char *options);
+void grouping_create_trimmed_mean10(RRDR *r, const char *options);
+void grouping_create_trimmed_mean15(RRDR *r, const char *options);
+void grouping_create_trimmed_mean20(RRDR *r, const char *options);
+void grouping_create_trimmed_mean25(RRDR *r, const char *options);
+void grouping_reset_trimmed_mean(RRDR *r);
+void grouping_free_trimmed_mean(RRDR *r);
+void grouping_add_trimmed_mean(RRDR *r, NETDATA_DOUBLE value);
+NETDATA_DOUBLE grouping_flush_trimmed_mean(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+
+#endif //NETDATA_API_QUERIES_TRIMMED_MEAN_H
diff --git a/web/api/queries/weights.c b/web/api/queries/weights.c
new file mode 100644
index 0000000..a9555a6
--- /dev/null
+++ b/web/api/queries/weights.c
@@ -0,0 +1,1107 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "daemon/common.h"
+#include "database/KolmogorovSmirnovDist.h"
+
+#define MAX_POINTS 10000
+int enable_metric_correlations = CONFIG_BOOLEAN_YES;
+int metric_correlations_version = 1;
+WEIGHTS_METHOD default_metric_correlations_method = WEIGHTS_METHOD_MC_KS2;
+
+typedef struct weights_stats {
+ NETDATA_DOUBLE max_base_high_ratio;
+ size_t db_points;
+ size_t result_points;
+ size_t db_queries;
+ size_t db_points_per_tier[RRD_STORAGE_TIERS];
+ size_t binary_searches;
+} WEIGHTS_STATS;
+
+// ----------------------------------------------------------------------------
+// parse and render metric correlations methods
+
+static struct {
+ const char *name;
+ WEIGHTS_METHOD value;
+} weights_methods[] = {
+ { "ks2" , WEIGHTS_METHOD_MC_KS2}
+ , { "volume" , WEIGHTS_METHOD_MC_VOLUME}
+ , { "anomaly-rate" , WEIGHTS_METHOD_ANOMALY_RATE}
+ , { NULL , 0 }
+};
+
+WEIGHTS_METHOD weights_string_to_method(const char *method) {
+ for(int i = 0; weights_methods[i].name ;i++)
+ if(strcmp(method, weights_methods[i].name) == 0)
+ return weights_methods[i].value;
+
+ return default_metric_correlations_method;
+}
+
+const char *weights_method_to_string(WEIGHTS_METHOD method) {
+ for(int i = 0; weights_methods[i].name ;i++)
+ if(weights_methods[i].value == method)
+ return weights_methods[i].name;
+
+ return "unknown";
+}
+
+// ----------------------------------------------------------------------------
+// The results per dimension are aggregated into a dictionary
+
+typedef enum {
+ RESULT_IS_BASE_HIGH_RATIO = (1 << 0),
+ RESULT_IS_PERCENTAGE_OF_TIME = (1 << 1),
+} RESULT_FLAGS;
+
+struct register_result {
+ RESULT_FLAGS flags;
+ RRDCONTEXT_ACQUIRED *rca;
+ RRDINSTANCE_ACQUIRED *ria;
+ RRDMETRIC_ACQUIRED *rma;
+ NETDATA_DOUBLE value;
+};
+
+static DICTIONARY *register_result_init() {
+ DICTIONARY *results = dictionary_create(DICT_OPTION_SINGLE_THREADED);
+ return results;
+}
+
+static void register_result_destroy(DICTIONARY *results) {
+ dictionary_destroy(results);
+}
+
+static void register_result(DICTIONARY *results,
+ RRDCONTEXT_ACQUIRED *rca,
+ RRDINSTANCE_ACQUIRED *ria,
+ RRDMETRIC_ACQUIRED *rma,
+ NETDATA_DOUBLE value,
+ RESULT_FLAGS flags,
+ WEIGHTS_STATS *stats,
+ bool register_zero) {
+
+ if(!netdata_double_isnumber(value)) return;
+
+ // make it positive
+ NETDATA_DOUBLE v = fabsndd(value);
+
+ // no need to store zero scored values
+ if(unlikely(fpclassify(v) == FP_ZERO && !register_zero))
+ return;
+
+ // keep track of the max of the baseline / highlight ratio
+ if(flags & RESULT_IS_BASE_HIGH_RATIO && v > stats->max_base_high_ratio)
+ stats->max_base_high_ratio = v;
+
+ struct register_result t = {
+ .flags = flags,
+ .rca = rca,
+ .ria = ria,
+ .rma = rma,
+ .value = v
+ };
+
+ // we can use the pointer address or RMA as a unique key for each metric
+ char buf[20 + 1];
+ ssize_t len = snprintfz(buf, 20, "%p", rma);
+ dictionary_set_advanced(results, buf, len + 1, &t, sizeof(struct register_result), NULL);
+}
+
+// ----------------------------------------------------------------------------
+// Generation of JSON output for the results
+
+static void results_header_to_json(DICTIONARY *results __maybe_unused, BUFFER *wb,
+ time_t after, time_t before,
+ time_t baseline_after, time_t baseline_before,
+ size_t points, WEIGHTS_METHOD method,
+ RRDR_GROUPING group, RRDR_OPTIONS options, uint32_t shifts,
+ size_t examined_dimensions __maybe_unused, usec_t duration,
+ WEIGHTS_STATS *stats) {
+
+ buffer_sprintf(wb, "{\n"
+ "\t\"after\": %lld,\n"
+ "\t\"before\": %lld,\n"
+ "\t\"duration\": %lld,\n"
+ "\t\"points\": %zu,\n",
+ (long long)after,
+ (long long)before,
+ (long long)(before - after),
+ points
+ );
+
+ if(method == WEIGHTS_METHOD_MC_KS2 || method == WEIGHTS_METHOD_MC_VOLUME)
+ buffer_sprintf(wb, ""
+ "\t\"baseline_after\": %lld,\n"
+ "\t\"baseline_before\": %lld,\n"
+ "\t\"baseline_duration\": %lld,\n"
+ "\t\"baseline_points\": %zu,\n",
+ (long long)baseline_after,
+ (long long)baseline_before,
+ (long long)(baseline_before - baseline_after),
+ points << shifts
+ );
+
+ buffer_sprintf(wb, ""
+ "\t\"statistics\": {\n"
+ "\t\t\"query_time_ms\": %f,\n"
+ "\t\t\"db_queries\": %zu,\n"
+ "\t\t\"query_result_points\": %zu,\n"
+ "\t\t\"binary_searches\": %zu,\n"
+ "\t\t\"db_points_read\": %zu,\n"
+ "\t\t\"db_points_per_tier\": [ ",
+ (double)duration / (double)USEC_PER_MS,
+ stats->db_queries,
+ stats->result_points,
+ stats->binary_searches,
+ stats->db_points
+ );
+
+ for(size_t tier = 0; tier < storage_tiers ;tier++)
+ buffer_sprintf(wb, "%s%zu", tier?", ":"", stats->db_points_per_tier[tier]);
+
+ buffer_sprintf(wb, " ]\n"
+ "\t},\n"
+ "\t\"group\": \"%s\",\n"
+ "\t\"method\": \"%s\",\n"
+ "\t\"options\": \"",
+ web_client_api_request_v1_data_group_to_string(group),
+ weights_method_to_string(method)
+ );
+
+ web_client_api_request_v1_data_options_to_buffer(wb, options);
+}
+
+static size_t registered_results_to_json_charts(DICTIONARY *results, BUFFER *wb,
+ time_t after, time_t before,
+ time_t baseline_after, time_t baseline_before,
+ size_t points, WEIGHTS_METHOD method,
+ RRDR_GROUPING group, RRDR_OPTIONS options, uint32_t shifts,
+ size_t examined_dimensions, usec_t duration,
+ WEIGHTS_STATS *stats) {
+
+ results_header_to_json(results, wb, after, before, baseline_after, baseline_before,
+ points, method, group, options, shifts, examined_dimensions, duration, stats);
+
+ buffer_strcat(wb, "\",\n\t\"correlated_charts\": {\n");
+
+ size_t charts = 0, chart_dims = 0, total_dimensions = 0;
+ struct register_result *t;
+ RRDINSTANCE_ACQUIRED *last_ria = NULL; // never access this - we use it only for comparison
+ dfe_start_read(results, t) {
+ if(t->ria != last_ria) {
+ last_ria = t->ria;
+
+ if(charts) buffer_strcat(wb, "\n\t\t\t}\n\t\t},\n");
+ buffer_strcat(wb, "\t\t\"");
+ buffer_strcat(wb, rrdinstance_acquired_id(t->ria));
+ buffer_strcat(wb, "\": {\n");
+ buffer_strcat(wb, "\t\t\t\"context\": \"");
+ buffer_strcat(wb, rrdcontext_acquired_id(t->rca));
+ buffer_strcat(wb, "\",\n\t\t\t\"dimensions\": {\n");
+ charts++;
+ chart_dims = 0;
+ }
+ if (chart_dims) buffer_sprintf(wb, ",\n");
+ buffer_sprintf(wb, "\t\t\t\t\"%s\": " NETDATA_DOUBLE_FORMAT, rrdmetric_acquired_name(t->rma), t->value);
+ chart_dims++;
+ total_dimensions++;
+ }
+ dfe_done(t);
+
+ // close dimensions and chart
+ if (total_dimensions)
+ buffer_strcat(wb, "\n\t\t\t}\n\t\t}\n");
+
+ // close correlated_charts
+ buffer_sprintf(wb, "\t},\n"
+ "\t\"correlated_dimensions\": %zu,\n"
+ "\t\"total_dimensions_count\": %zu\n"
+ "}\n",
+ total_dimensions,
+ examined_dimensions
+ );
+
+ return total_dimensions;
+}
+
+static size_t registered_results_to_json_contexts(DICTIONARY *results, BUFFER *wb,
+ time_t after, time_t before,
+ time_t baseline_after, time_t baseline_before,
+ size_t points, WEIGHTS_METHOD method,
+ RRDR_GROUPING group, RRDR_OPTIONS options, uint32_t shifts,
+ size_t examined_dimensions, usec_t duration,
+ WEIGHTS_STATS *stats) {
+
+ results_header_to_json(results, wb, after, before, baseline_after, baseline_before,
+ points, method, group, options, shifts, examined_dimensions, duration, stats);
+
+ buffer_strcat(wb, "\",\n\t\"contexts\": {\n");
+
+ size_t contexts = 0, charts = 0, total_dimensions = 0, context_dims = 0, chart_dims = 0;
+ NETDATA_DOUBLE contexts_total_weight = 0.0, charts_total_weight = 0.0;
+ struct register_result *t;
+ RRDCONTEXT_ACQUIRED *last_rca = NULL;
+ RRDINSTANCE_ACQUIRED *last_ria = NULL;
+ dfe_start_read(results, t) {
+
+ if(t->rca != last_rca) {
+ last_rca = t->rca;
+
+ if(contexts)
+ buffer_sprintf(wb, "\n"
+ "\t\t\t\t\t},\n"
+ "\t\t\t\t\t\"weight\":" NETDATA_DOUBLE_FORMAT "\n"
+ "\t\t\t\t}\n\t\t\t},\n"
+ "\t\t\t\"weight\":" NETDATA_DOUBLE_FORMAT "\n\t\t},\n"
+ , charts_total_weight / (double)chart_dims
+ , contexts_total_weight / (double)context_dims);
+
+ buffer_strcat(wb, "\t\t\"");
+ buffer_strcat(wb, rrdcontext_acquired_id(t->rca));
+ buffer_strcat(wb, "\": {\n\t\t\t\"charts\":{\n");
+
+ contexts++;
+ charts = 0;
+ context_dims = 0;
+ contexts_total_weight = 0.0;
+
+ last_ria = NULL;
+ }
+
+ if(t->ria != last_ria) {
+ last_ria = t->ria;
+
+ if(charts)
+ buffer_sprintf(wb, "\n"
+ "\t\t\t\t\t},\n"
+ "\t\t\t\t\t\"weight\":" NETDATA_DOUBLE_FORMAT "\n"
+ "\t\t\t\t},\n"
+ , charts_total_weight / (double)chart_dims);
+
+ buffer_strcat(wb, "\t\t\t\t\"");
+ buffer_strcat(wb, rrdinstance_acquired_id(t->ria));
+ buffer_strcat(wb, "\": {\n");
+ buffer_strcat(wb, "\t\t\t\t\t\"dimensions\": {\n");
+
+ charts++;
+ chart_dims = 0;
+ charts_total_weight = 0.0;
+ }
+
+ if (chart_dims) buffer_sprintf(wb, ",\n");
+ buffer_sprintf(wb, "\t\t\t\t\t\t\"%s\": " NETDATA_DOUBLE_FORMAT, rrdmetric_acquired_name(t->rma), t->value);
+ charts_total_weight += t->value;
+ contexts_total_weight += t->value;
+ chart_dims++;
+ context_dims++;
+ total_dimensions++;
+ }
+ dfe_done(t);
+
+ // close dimensions and chart
+ if (total_dimensions)
+ buffer_sprintf(wb, "\n"
+ "\t\t\t\t\t},\n"
+ "\t\t\t\t\t\"weight\":" NETDATA_DOUBLE_FORMAT "\n"
+ "\t\t\t\t}\n"
+ "\t\t\t},\n"
+ "\t\t\t\"weight\":" NETDATA_DOUBLE_FORMAT "\n"
+ "\t\t}\n"
+ , charts_total_weight / (double)chart_dims
+ , contexts_total_weight / (double)context_dims);
+
+ // close correlated_charts
+ buffer_sprintf(wb, "\t},\n"
+ "\t\"weighted_dimensions\": %zu,\n"
+ "\t\"total_dimensions_count\": %zu\n"
+ "}\n",
+ total_dimensions,
+ examined_dimensions
+ );
+
+ return total_dimensions;
+}
+
+// ----------------------------------------------------------------------------
+// KS2 algorithm functions
+
+typedef long int DIFFS_NUMBERS;
+#define DOUBLE_TO_INT_MULTIPLIER 100000
+
+static inline int binary_search_bigger_than(const DIFFS_NUMBERS arr[], int left, int size, DIFFS_NUMBERS K) {
+ // binary search to find the index the smallest index
+ // of the first value in the array that is greater than K
+
+ int right = size;
+ while(left < right) {
+ int middle = (int)(((unsigned int)(left + right)) >> 1);
+
+ if(arr[middle] > K)
+ right = middle;
+
+ else
+ left = middle + 1;
+ }
+
+ return left;
+}
+
+int compare_diffs(const void *left, const void *right) {
+ DIFFS_NUMBERS lt = *(DIFFS_NUMBERS *)left;
+ DIFFS_NUMBERS rt = *(DIFFS_NUMBERS *)right;
+
+ // https://stackoverflow.com/a/3886497/1114110
+ return (lt > rt) - (lt < rt);
+}
+
+static size_t calculate_pairs_diff(DIFFS_NUMBERS *diffs, NETDATA_DOUBLE *arr, size_t size) {
+ NETDATA_DOUBLE *last = &arr[size - 1];
+ size_t added = 0;
+
+ while(last > arr) {
+ NETDATA_DOUBLE second = *last--;
+ NETDATA_DOUBLE first = *last;
+ *diffs++ = (DIFFS_NUMBERS)((first - second) * (NETDATA_DOUBLE)DOUBLE_TO_INT_MULTIPLIER);
+ added++;
+ }
+
+ return added;
+}
+
+static double ks_2samp(
+ DIFFS_NUMBERS baseline_diffs[], int base_size,
+ DIFFS_NUMBERS highlight_diffs[], int high_size,
+ uint32_t base_shifts) {
+
+ qsort(baseline_diffs, base_size, sizeof(DIFFS_NUMBERS), compare_diffs);
+ qsort(highlight_diffs, high_size, sizeof(DIFFS_NUMBERS), compare_diffs);
+
+ // Now we should be calculating this:
+ //
+ // For each number in the diffs arrays, we should find the index of the
+ // number bigger than them in both arrays and calculate the % of this index
+ // vs the total array size. Once we have the 2 percentages, we should find
+ // the min and max across the delta of all of them.
+ //
+ // It should look like this:
+ //
+ // base_pcent = binary_search_bigger_than(...) / base_size;
+ // high_pcent = binary_search_bigger_than(...) / high_size;
+ // delta = base_pcent - high_pcent;
+ // if(delta < min) min = delta;
+ // if(delta > max) max = delta;
+ //
+ // This would require a lot of multiplications and divisions.
+ //
+ // To speed it up, we do the binary search to find the index of each number
+ // but, then we divide the base index by the power of two number (shifts) it
+ // is bigger than high index. So the 2 indexes are now comparable.
+ // We also keep track of the original indexes with min and max, to properly
+ // calculate their percentages once the loops finish.
+
+
+ // initialize min and max using the first number of baseline_diffs
+ DIFFS_NUMBERS K = baseline_diffs[0];
+ int base_idx = binary_search_bigger_than(baseline_diffs, 1, base_size, K);
+ int high_idx = binary_search_bigger_than(highlight_diffs, 0, high_size, K);
+ int delta = base_idx - (high_idx << base_shifts);
+ int min = delta, max = delta;
+ int base_min_idx = base_idx;
+ int base_max_idx = base_idx;
+ int high_min_idx = high_idx;
+ int high_max_idx = high_idx;
+
+ // do the baseline_diffs starting from 1 (we did position 0 above)
+ for(int i = 1; i < base_size; i++) {
+ K = baseline_diffs[i];
+ base_idx = binary_search_bigger_than(baseline_diffs, i + 1, base_size, K); // starting from i, since data1 is sorted
+ high_idx = binary_search_bigger_than(highlight_diffs, 0, high_size, K);
+
+ delta = base_idx - (high_idx << base_shifts);
+ if(delta < min) {
+ min = delta;
+ base_min_idx = base_idx;
+ high_min_idx = high_idx;
+ }
+ else if(delta > max) {
+ max = delta;
+ base_max_idx = base_idx;
+ high_max_idx = high_idx;
+ }
+ }
+
+ // do the highlight_diffs starting from 0
+ for(int i = 0; i < high_size; i++) {
+ K = highlight_diffs[i];
+ base_idx = binary_search_bigger_than(baseline_diffs, 0, base_size, K);
+ high_idx = binary_search_bigger_than(highlight_diffs, i + 1, high_size, K); // starting from i, since data2 is sorted
+
+ delta = base_idx - (high_idx << base_shifts);
+ if(delta < min) {
+ min = delta;
+ base_min_idx = base_idx;
+ high_min_idx = high_idx;
+ }
+ else if(delta > max) {
+ max = delta;
+ base_max_idx = base_idx;
+ high_max_idx = high_idx;
+ }
+ }
+
+ // now we have the min, max and their indexes
+ // properly calculate min and max as dmin and dmax
+ double dbase_size = (double)base_size;
+ double dhigh_size = (double)high_size;
+ double dmin = ((double)base_min_idx / dbase_size) - ((double)high_min_idx / dhigh_size);
+ double dmax = ((double)base_max_idx / dbase_size) - ((double)high_max_idx / dhigh_size);
+
+ dmin = -dmin;
+ if(islessequal(dmin, 0.0)) dmin = 0.0;
+ else if(isgreaterequal(dmin, 1.0)) dmin = 1.0;
+
+ double d;
+ if(isgreaterequal(dmin, dmax)) d = dmin;
+ else d = dmax;
+
+ double en = round(dbase_size * dhigh_size / (dbase_size + dhigh_size));
+
+ // under these conditions, KSfbar() crashes
+ if(unlikely(isnan(en) || isinf(en) || en == 0.0 || isnan(d) || isinf(d)))
+ return NAN;
+
+ return KSfbar((int)en, d);
+}
+
+static double kstwo(
+ NETDATA_DOUBLE baseline[], int baseline_points,
+ NETDATA_DOUBLE highlight[], int highlight_points,
+ uint32_t base_shifts) {
+
+ // -1 in size, since the calculate_pairs_diffs() returns one less point
+ DIFFS_NUMBERS baseline_diffs[baseline_points - 1];
+ DIFFS_NUMBERS highlight_diffs[highlight_points - 1];
+
+ int base_size = (int)calculate_pairs_diff(baseline_diffs, baseline, baseline_points);
+ int high_size = (int)calculate_pairs_diff(highlight_diffs, highlight, highlight_points);
+
+ if(unlikely(!base_size || !high_size))
+ return NAN;
+
+ if(unlikely(base_size != baseline_points - 1 || high_size != highlight_points - 1)) {
+ error("Metric correlations: internal error - calculate_pairs_diff() returns the wrong number of entries");
+ return NAN;
+ }
+
+ return ks_2samp(baseline_diffs, base_size, highlight_diffs, high_size, base_shifts);
+}
+
+NETDATA_DOUBLE *rrd2rrdr_ks2(
+ ONEWAYALLOC *owa, RRDHOST *host,
+ RRDCONTEXT_ACQUIRED *rca, RRDINSTANCE_ACQUIRED *ria, RRDMETRIC_ACQUIRED *rma,
+ time_t after, time_t before, size_t points, RRDR_OPTIONS options,
+ RRDR_GROUPING group_method, const char *group_options, size_t tier,
+ WEIGHTS_STATS *stats,
+ size_t *entries
+ ) {
+
+ NETDATA_DOUBLE *ret = NULL;
+
+ QUERY_TARGET_REQUEST qtr = {
+ .host = host,
+ .rca = rca,
+ .ria = ria,
+ .rma = rma,
+ .after = after,
+ .before = before,
+ .points = points,
+ .options = options,
+ .group_method = group_method,
+ .group_options = group_options,
+ .tier = tier,
+ .query_source = QUERY_SOURCE_API_WEIGHTS,
+ };
+
+ RRDR *r = rrd2rrdr(owa, query_target_create(&qtr));
+ if(!r)
+ goto cleanup;
+
+ stats->db_queries++;
+ stats->result_points += r->internal.result_points_generated;
+ stats->db_points += r->internal.db_points_read;
+ for(size_t tr = 0; tr < storage_tiers ; tr++)
+ stats->db_points_per_tier[tr] += r->internal.tier_points_read[tr];
+
+ if(r->d != 1) {
+ error("WEIGHTS: on query '%s' expected 1 dimension in RRDR but got %zu", r->internal.qt->id, r->d);
+ goto cleanup;
+ }
+
+ if(unlikely(r->od[0] & RRDR_DIMENSION_HIDDEN))
+ goto cleanup;
+
+ if(unlikely(!(r->od[0] & RRDR_DIMENSION_NONZERO)))
+ goto cleanup;
+
+ if(rrdr_rows(r) < 2)
+ goto cleanup;
+
+ *entries = rrdr_rows(r);
+ ret = onewayalloc_mallocz(owa, sizeof(NETDATA_DOUBLE) * rrdr_rows(r));
+
+ // copy the points of the dimension to a contiguous array
+ // there is no need to check for empty values, since empty values are already zero
+ // https://github.com/netdata/netdata/blob/6e3144683a73a2024d51425b20ecfd569034c858/web/api/queries/average/average.c#L41-L43
+ memcpy(ret, r->v, rrdr_rows(r) * sizeof(NETDATA_DOUBLE));
+
+cleanup:
+ rrdr_free(owa, r);
+ return ret;
+}
+
+static void rrdset_metric_correlations_ks2(
+ RRDHOST *host,
+ RRDCONTEXT_ACQUIRED *rca, RRDINSTANCE_ACQUIRED *ria, RRDMETRIC_ACQUIRED *rma,
+ DICTIONARY *results,
+ time_t baseline_after, time_t baseline_before,
+ time_t after, time_t before,
+ size_t points, RRDR_OPTIONS options,
+ RRDR_GROUPING group_method, const char *group_options, size_t tier,
+ uint32_t shifts,
+ WEIGHTS_STATS *stats, bool register_zero
+ ) {
+
+ options |= RRDR_OPTION_NATURAL_POINTS;
+
+ ONEWAYALLOC *owa = onewayalloc_create(16 * 1024);
+
+ size_t high_points = 0;
+ NETDATA_DOUBLE *highlight = rrd2rrdr_ks2(
+ owa, host, rca, ria, rma, after, before, points,
+ options, group_method, group_options, tier, stats, &high_points);
+
+ if(!highlight)
+ goto cleanup;
+
+ size_t base_points = 0;
+ NETDATA_DOUBLE *baseline = rrd2rrdr_ks2(
+ owa, host, rca, ria, rma, baseline_after, baseline_before, high_points << shifts,
+ options, group_method, group_options, tier, stats, &base_points);
+
+ if(!baseline)
+ goto cleanup;
+
+ stats->binary_searches += 2 * (base_points - 1) + 2 * (high_points - 1);
+
+ double prob = kstwo(baseline, (int)base_points, highlight, (int)high_points, shifts);
+ if(!isnan(prob) && !isinf(prob)) {
+
+ // these conditions should never happen, but still let's check
+ if(unlikely(prob < 0.0)) {
+ error("Metric correlations: kstwo() returned a negative number: %f", prob);
+ prob = -prob;
+ }
+ if(unlikely(prob > 1.0)) {
+ error("Metric correlations: kstwo() returned a number above 1.0: %f", prob);
+ prob = 1.0;
+ }
+
+ // to spread the results evenly, 0.0 needs to be the less correlated and 1.0 the most correlated
+ // so, we flip the result of kstwo()
+ register_result(results, rca, ria, rma, 1.0 - prob, RESULT_IS_BASE_HIGH_RATIO, stats, register_zero);
+ }
+
+cleanup:
+ onewayalloc_destroy(owa);
+}
+
+// ----------------------------------------------------------------------------
+// VOLUME algorithm functions
+
+static void merge_query_value_to_stats(QUERY_VALUE *qv, WEIGHTS_STATS *stats) {
+ stats->db_queries++;
+ stats->result_points += qv->result_points;
+ stats->db_points += qv->points_read;
+ for(size_t tier = 0; tier < storage_tiers ; tier++)
+ stats->db_points_per_tier[tier] += qv->storage_points_per_tier[tier];
+}
+
+static void rrdset_metric_correlations_volume(
+ RRDHOST *host,
+ RRDCONTEXT_ACQUIRED *rca, RRDINSTANCE_ACQUIRED *ria, RRDMETRIC_ACQUIRED *rma,
+ DICTIONARY *results,
+ time_t baseline_after, time_t baseline_before,
+ time_t after, time_t before,
+ RRDR_OPTIONS options, RRDR_GROUPING group_method, const char *group_options,
+ size_t tier,
+ WEIGHTS_STATS *stats, bool register_zero) {
+
+ options |= RRDR_OPTION_MATCH_IDS | RRDR_OPTION_ABSOLUTE | RRDR_OPTION_NATURAL_POINTS;
+
+ QUERY_VALUE baseline_average = rrdmetric2value(host, rca, ria, rma, baseline_after, baseline_before, options, group_method, group_options, tier, 0, QUERY_SOURCE_API_WEIGHTS);
+ merge_query_value_to_stats(&baseline_average, stats);
+
+ if(!netdata_double_isnumber(baseline_average.value)) {
+ // this means no data for the baseline window, but we may have data for the highlighted one - assume zero
+ baseline_average.value = 0.0;
+ }
+
+ QUERY_VALUE highlight_average = rrdmetric2value(host, rca, ria, rma, after, before, options, group_method, group_options, tier, 0, QUERY_SOURCE_API_WEIGHTS);
+ merge_query_value_to_stats(&highlight_average, stats);
+
+ if(!netdata_double_isnumber(highlight_average.value))
+ return;
+
+ if(baseline_average.value == highlight_average.value) {
+ // they are the same - let's move on
+ return;
+ }
+
+ char highlight_countif_options[50 + 1];
+ snprintfz(highlight_countif_options, 50, "%s" NETDATA_DOUBLE_FORMAT, highlight_average.value < baseline_average.value ? "<" : ">", baseline_average.value);
+ QUERY_VALUE highlight_countif = rrdmetric2value(host, rca, ria, rma, after, before, options, RRDR_GROUPING_COUNTIF, highlight_countif_options, tier, 0, QUERY_SOURCE_API_WEIGHTS);
+ merge_query_value_to_stats(&highlight_countif, stats);
+
+ if(!netdata_double_isnumber(highlight_countif.value)) {
+ info("WEIGHTS: highlighted countif query failed, but highlighted average worked - strange...");
+ return;
+ }
+
+ // this represents the percentage of time
+ // the highlighted window was above/below the baseline window
+ // (above or below depending on their averages)
+ highlight_countif.value = highlight_countif.value / 100.0; // countif returns 0 - 100.0
+
+ RESULT_FLAGS flags;
+ NETDATA_DOUBLE pcent = NAN;
+ if(isgreater(baseline_average.value, 0.0) || isless(baseline_average.value, 0.0)) {
+ flags = RESULT_IS_BASE_HIGH_RATIO;
+ pcent = (highlight_average.value - baseline_average.value) / baseline_average.value * highlight_countif.value;
+ }
+ else {
+ flags = RESULT_IS_PERCENTAGE_OF_TIME;
+ pcent = highlight_countif.value;
+ }
+
+ register_result(results, rca, ria, rma, pcent, flags, stats, register_zero);
+}
+
+// ----------------------------------------------------------------------------
+// ANOMALY RATE algorithm functions
+
+static void rrdset_weights_anomaly_rate(
+ RRDHOST *host,
+ RRDCONTEXT_ACQUIRED *rca, RRDINSTANCE_ACQUIRED *ria, RRDMETRIC_ACQUIRED *rma,
+ DICTIONARY *results,
+ time_t after, time_t before,
+ RRDR_OPTIONS options, RRDR_GROUPING group_method, const char *group_options,
+ size_t tier,
+ WEIGHTS_STATS *stats, bool register_zero) {
+
+ options |= RRDR_OPTION_MATCH_IDS | RRDR_OPTION_ANOMALY_BIT | RRDR_OPTION_NATURAL_POINTS;
+
+ QUERY_VALUE qv = rrdmetric2value(host, rca, ria, rma, after, before, options, group_method, group_options, tier, 0, QUERY_SOURCE_API_WEIGHTS);
+ merge_query_value_to_stats(&qv, stats);
+
+ if(netdata_double_isnumber(qv.value))
+ register_result(results, rca, ria, rma, qv.value, 0, stats, register_zero);
+}
+
+// ----------------------------------------------------------------------------
+
+int compare_netdata_doubles(const void *left, const void *right) {
+ NETDATA_DOUBLE lt = *(NETDATA_DOUBLE *)left;
+ NETDATA_DOUBLE rt = *(NETDATA_DOUBLE *)right;
+
+ // https://stackoverflow.com/a/3886497/1114110
+ return (lt > rt) - (lt < rt);
+}
+
+static inline int binary_search_bigger_than_netdata_double(const NETDATA_DOUBLE arr[], int left, int size, NETDATA_DOUBLE K) {
+ // binary search to find the index the smallest index
+ // of the first value in the array that is greater than K
+
+ int right = size;
+ while(left < right) {
+ int middle = (int)(((unsigned int)(left + right)) >> 1);
+
+ if(arr[middle] > K)
+ right = middle;
+
+ else
+ left = middle + 1;
+ }
+
+ return left;
+}
+
+// ----------------------------------------------------------------------------
+// spread the results evenly according to their value
+
+static size_t spread_results_evenly(DICTIONARY *results, WEIGHTS_STATS *stats) {
+ struct register_result *t;
+
+ // count the dimensions
+ size_t dimensions = dictionary_entries(results);
+ if(!dimensions) return 0;
+
+ if(stats->max_base_high_ratio == 0.0)
+ stats->max_base_high_ratio = 1.0;
+
+ // create an array of the right size and copy all the values in it
+ NETDATA_DOUBLE slots[dimensions];
+ dimensions = 0;
+ dfe_start_read(results, t) {
+ if(t->flags & (RESULT_IS_PERCENTAGE_OF_TIME))
+ t->value = t->value * stats->max_base_high_ratio;
+
+ slots[dimensions++] = t->value;
+ }
+ dfe_done(t);
+
+ // sort the array with the values of all dimensions
+ qsort(slots, dimensions, sizeof(NETDATA_DOUBLE), compare_netdata_doubles);
+
+ // skip the duplicates in the sorted array
+ NETDATA_DOUBLE last_value = NAN;
+ size_t unique_values = 0;
+ for(size_t i = 0; i < dimensions ;i++) {
+ if(likely(slots[i] != last_value))
+ slots[unique_values++] = last_value = slots[i];
+ }
+
+ // this cannot happen, but coverity thinks otherwise...
+ if(!unique_values)
+ unique_values = dimensions;
+
+ // calculate the weight of each slot, using the number of unique values
+ NETDATA_DOUBLE slot_weight = 1.0 / (NETDATA_DOUBLE)unique_values;
+
+ dfe_start_read(results, t) {
+ int slot = binary_search_bigger_than_netdata_double(slots, 0, (int)unique_values, t->value);
+ NETDATA_DOUBLE v = slot * slot_weight;
+ if(unlikely(v > 1.0)) v = 1.0;
+ v = 1.0 - v;
+ t->value = v;
+ }
+ dfe_done(t);
+
+ return dimensions;
+}
+
+// ----------------------------------------------------------------------------
+// The main function
+
+int web_api_v1_weights(
+ RRDHOST *host, BUFFER *wb, WEIGHTS_METHOD method, WEIGHTS_FORMAT format,
+ RRDR_GROUPING group, const char *group_options,
+ time_t baseline_after, time_t baseline_before,
+ time_t after, time_t before,
+ size_t points, RRDR_OPTIONS options, SIMPLE_PATTERN *contexts, size_t tier, size_t timeout) {
+
+ WEIGHTS_STATS stats = {};
+
+ DICTIONARY *results = register_result_init();
+ DICTIONARY *metrics = NULL;
+ char *error = NULL;
+ int resp = HTTP_RESP_OK;
+
+ // if the user didn't give a timeout
+ // assume 60 seconds
+ if(!timeout)
+ timeout = 60 * MSEC_PER_SEC;
+
+ // if the timeout is less than 1 second
+ // make it at least 1 second
+ if(timeout < (long)(1 * MSEC_PER_SEC))
+ timeout = 1 * MSEC_PER_SEC;
+
+ usec_t timeout_usec = timeout * USEC_PER_MS;
+ usec_t started_usec = now_realtime_usec();
+
+ if(!rrdr_relative_window_to_absolute(&after, &before))
+ buffer_no_cacheable(wb);
+
+ if (before <= after) {
+ resp = HTTP_RESP_BAD_REQUEST;
+ error = "Invalid selected time-range.";
+ goto cleanup;
+ }
+
+ uint32_t shifts = 0;
+ if(method == WEIGHTS_METHOD_MC_KS2 || method == WEIGHTS_METHOD_MC_VOLUME) {
+ if(!points) points = 500;
+
+ if(baseline_before <= API_RELATIVE_TIME_MAX)
+ baseline_before += after;
+
+ rrdr_relative_window_to_absolute(&baseline_after, &baseline_before);
+
+ if (baseline_before <= baseline_after) {
+ resp = HTTP_RESP_BAD_REQUEST;
+ error = "Invalid baseline time-range.";
+ goto cleanup;
+ }
+
+ // baseline should be a power of two multiple of highlight
+ long long base_delta = baseline_before - baseline_after;
+ long long high_delta = before - after;
+ uint32_t multiplier = (uint32_t)round((double)base_delta / (double)high_delta);
+
+ // check if the multiplier is a power of two
+ // https://stackoverflow.com/a/600306/1114110
+ if((multiplier & (multiplier - 1)) != 0) {
+ // it is not power of two
+ // let's find the closest power of two
+ // https://stackoverflow.com/a/466242/1114110
+ multiplier--;
+ multiplier |= multiplier >> 1;
+ multiplier |= multiplier >> 2;
+ multiplier |= multiplier >> 4;
+ multiplier |= multiplier >> 8;
+ multiplier |= multiplier >> 16;
+ multiplier++;
+ }
+
+ // convert the multiplier to the number of shifts
+ // we need to do, to divide baseline numbers to match
+ // the highlight ones
+ while(multiplier > 1) {
+ shifts++;
+ multiplier = multiplier >> 1;
+ }
+
+ // if the baseline size will not comply to MAX_POINTS
+ // lower the window of the baseline
+ while(shifts && (points << shifts) > MAX_POINTS)
+ shifts--;
+
+ // if the baseline size still does not comply to MAX_POINTS
+ // lower the resolution of the highlight and the baseline
+ while((points << shifts) > MAX_POINTS)
+ points = points >> 1;
+
+ if(points < 15) {
+ resp = HTTP_RESP_BAD_REQUEST;
+ error = "Too few points available, at least 15 are needed.";
+ goto cleanup;
+ }
+
+ // adjust the baseline to be multiplier times bigger than the highlight
+ baseline_after = baseline_before - (high_delta << shifts);
+ }
+
+ size_t examined_dimensions = 0;
+
+ bool register_zero = true;
+ if(options & RRDR_OPTION_NONZERO) {
+ register_zero = false;
+ options &= ~RRDR_OPTION_NONZERO;
+ }
+
+ metrics = rrdcontext_all_metrics_to_dict(host, contexts);
+ struct metric_entry *me;
+
+ // for every metric_entry in the dictionary
+ dfe_start_read(metrics, me) {
+ usec_t now_usec = now_realtime_usec();
+ if(now_usec - started_usec > timeout_usec) {
+ error = "timed out";
+ resp = HTTP_RESP_GATEWAY_TIMEOUT;
+ goto cleanup;
+ }
+
+ examined_dimensions++;
+
+ switch(method) {
+ case WEIGHTS_METHOD_ANOMALY_RATE:
+ options |= RRDR_OPTION_ANOMALY_BIT;
+ rrdset_weights_anomaly_rate(
+ host,
+ me->rca, me->ria, me->rma,
+ results,
+ after, before,
+ options, group, group_options, tier,
+ &stats, register_zero
+ );
+ break;
+
+ case WEIGHTS_METHOD_MC_VOLUME:
+ rrdset_metric_correlations_volume(
+ host,
+ me->rca, me->ria, me->rma,
+ results,
+ baseline_after, baseline_before,
+ after, before,
+ options, group, group_options, tier,
+ &stats, register_zero
+ );
+ break;
+
+ default:
+ case WEIGHTS_METHOD_MC_KS2:
+ rrdset_metric_correlations_ks2(
+ host,
+ me->rca, me->ria, me->rma,
+ results,
+ baseline_after, baseline_before,
+ after, before, points,
+ options, group, group_options, tier, shifts,
+ &stats, register_zero
+ );
+ break;
+ }
+ }
+ dfe_done(me);
+
+ if(!register_zero)
+ options |= RRDR_OPTION_NONZERO;
+
+ if(!(options & RRDR_OPTION_RETURN_RAW))
+ spread_results_evenly(results, &stats);
+
+ usec_t ended_usec = now_realtime_usec();
+
+ // generate the json output we need
+ buffer_flush(wb);
+
+ size_t added_dimensions = 0;
+ switch(format) {
+ case WEIGHTS_FORMAT_CHARTS:
+ added_dimensions =
+ registered_results_to_json_charts(
+ results, wb,
+ after, before,
+ baseline_after, baseline_before,
+ points, method, group, options, shifts,
+ examined_dimensions,
+ ended_usec - started_usec, &stats);
+ break;
+
+ default:
+ case WEIGHTS_FORMAT_CONTEXTS:
+ added_dimensions =
+ registered_results_to_json_contexts(
+ results, wb,
+ after, before,
+ baseline_after, baseline_before,
+ points, method, group, options, shifts,
+ examined_dimensions,
+ ended_usec - started_usec, &stats);
+ break;
+ }
+
+ if(!added_dimensions) {
+ error = "no results produced.";
+ resp = HTTP_RESP_NOT_FOUND;
+ }
+
+cleanup:
+ if(metrics) dictionary_destroy(metrics);
+ if(results) register_result_destroy(results);
+
+ if(error) {
+ buffer_flush(wb);
+ buffer_sprintf(wb, "{\"error\": \"%s\" }", error);
+ }
+
+ return resp;
+}
+
+// ----------------------------------------------------------------------------
+// unittest
+
+/*
+
+Unit tests against the output of this:
+
+https://github.com/scipy/scipy/blob/4cf21e753cf937d1c6c2d2a0e372fbc1dbbeea81/scipy/stats/_stats_py.py#L7275-L7449
+
+import matplotlib.pyplot as plt
+import pandas as pd
+import numpy as np
+import scipy as sp
+from scipy import stats
+
+data1 = np.array([ 1111, -2222, 33, 100, 100, 15555, -1, 19999, 888, 755, -1, -730 ])
+data2 = np.array([365, -123, 0])
+data1 = np.sort(data1)
+data2 = np.sort(data2)
+n1 = data1.shape[0]
+n2 = data2.shape[0]
+data_all = np.concatenate([data1, data2])
+cdf1 = np.searchsorted(data1, data_all, side='right') / n1
+cdf2 = np.searchsorted(data2, data_all, side='right') / n2
+print(data_all)
+print("\ndata1", data1, cdf1)
+print("\ndata2", data2, cdf2)
+cddiffs = cdf1 - cdf2
+print("\ncddiffs", cddiffs)
+minS = np.clip(-np.min(cddiffs), 0, 1)
+maxS = np.max(cddiffs)
+print("\nmin", minS)
+print("max", maxS)
+m, n = sorted([float(n1), float(n2)], reverse=True)
+en = m * n / (m + n)
+d = max(minS, maxS)
+prob = stats.distributions.kstwo.sf(d, np.round(en))
+print("\nprob", prob)
+
+*/
+
+static int double_expect(double v, const char *str, const char *descr) {
+ char buf[100 + 1];
+ snprintfz(buf, 100, "%0.6f", v);
+ int ret = strcmp(buf, str) ? 1 : 0;
+
+ fprintf(stderr, "%s %s, expected %s, got %s\n", ret?"FAILED":"OK", descr, str, buf);
+ return ret;
+}
+
+static int mc_unittest1(void) {
+ int bs = 3, hs = 3;
+ DIFFS_NUMBERS base[3] = { 1, 2, 3 };
+ DIFFS_NUMBERS high[3] = { 3, 4, 6 };
+
+ double prob = ks_2samp(base, bs, high, hs, 0);
+ return double_expect(prob, "0.222222", "3x3");
+}
+
+static int mc_unittest2(void) {
+ int bs = 6, hs = 3;
+ DIFFS_NUMBERS base[6] = { 1, 2, 3, 10, 10, 15 };
+ DIFFS_NUMBERS high[3] = { 3, 4, 6 };
+
+ double prob = ks_2samp(base, bs, high, hs, 1);
+ return double_expect(prob, "0.500000", "6x3");
+}
+
+static int mc_unittest3(void) {
+ int bs = 12, hs = 3;
+ DIFFS_NUMBERS base[12] = { 1, 2, 3, 10, 10, 15, 111, 19999, 8, 55, -1, -73 };
+ DIFFS_NUMBERS high[3] = { 3, 4, 6 };
+
+ double prob = ks_2samp(base, bs, high, hs, 2);
+ return double_expect(prob, "0.347222", "12x3");
+}
+
+static int mc_unittest4(void) {
+ int bs = 12, hs = 3;
+ DIFFS_NUMBERS base[12] = { 1111, -2222, 33, 100, 100, 15555, -1, 19999, 888, 755, -1, -730 };
+ DIFFS_NUMBERS high[3] = { 365, -123, 0 };
+
+ double prob = ks_2samp(base, bs, high, hs, 2);
+ return double_expect(prob, "0.777778", "12x3");
+}
+
+int mc_unittest(void) {
+ int errors = 0;
+
+ errors += mc_unittest1();
+ errors += mc_unittest2();
+ errors += mc_unittest3();
+ errors += mc_unittest4();
+
+ return errors;
+}
+
diff --git a/web/api/queries/weights.h b/web/api/queries/weights.h
new file mode 100644
index 0000000..50d8634
--- /dev/null
+++ b/web/api/queries/weights.h
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_WEIGHTS_H
+#define NETDATA_API_WEIGHTS_H 1
+
+#include "query.h"
+
+typedef enum {
+ WEIGHTS_METHOD_MC_KS2 = 1,
+ WEIGHTS_METHOD_MC_VOLUME = 2,
+ WEIGHTS_METHOD_ANOMALY_RATE = 3,
+} WEIGHTS_METHOD;
+
+typedef enum {
+ WEIGHTS_FORMAT_CHARTS = 1,
+ WEIGHTS_FORMAT_CONTEXTS = 2,
+} WEIGHTS_FORMAT;
+
+extern int enable_metric_correlations;
+extern int metric_correlations_version;
+extern WEIGHTS_METHOD default_metric_correlations_method;
+
+int web_api_v1_weights (RRDHOST *host, BUFFER *wb, WEIGHTS_METHOD method, WEIGHTS_FORMAT format,
+ RRDR_GROUPING group, const char *group_options,
+ time_t baseline_after, time_t baseline_before,
+ time_t after, time_t before,
+ size_t points, RRDR_OPTIONS options, SIMPLE_PATTERN *contexts, size_t tier, size_t timeout);
+
+WEIGHTS_METHOD weights_string_to_method(const char *method);
+const char *weights_method_to_string(WEIGHTS_METHOD method);
+int mc_unittest(void);
+
+#endif //NETDATA_API_WEIGHTS_H
diff --git a/web/api/tests/valid_urls.c b/web/api/tests/valid_urls.c
new file mode 100644
index 0000000..8a2a87f
--- /dev/null
+++ b/web/api/tests/valid_urls.c
@@ -0,0 +1,789 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "libnetdata/libnetdata.h"
+#include "libnetdata/required_dummies.h"
+#include "database/rrd.h"
+#include "web/server/web_client.h"
+#include <setjmp.h>
+#include <cmocka.h>
+#include <stdbool.h>
+
+void free_temporary_host(RRDHOST *host)
+{
+ (void) host;
+}
+
+void *__wrap_free_temporary_host(RRDHOST *host)
+{
+ (void) host;
+ return NULL;
+}
+
+void repr(char *result, int result_size, char const *buf, int size)
+{
+ int n;
+ char *end = result + result_size - 1;
+ unsigned char const *ubuf = (unsigned char const *)buf;
+ while (size && result_size > 0) {
+ if (*ubuf <= 0x20 || *ubuf >= 0x80) {
+ n = snprintf(result, result_size, "\\%02X", *ubuf);
+ } else {
+ *result = *ubuf;
+ n = 1;
+ }
+ result += n;
+ result_size -= n;
+ ubuf++;
+ size--;
+ }
+ if (result_size > 0)
+ *(result++) = 0;
+ else
+ *end = 0;
+}
+
+// ---------------------------------- Mocking accesses from web_client ------------------------------------------------
+
+ssize_t send(int sockfd, const void *buf, size_t len, int flags)
+{
+ info("Mocking send: %zu bytes\n", len);
+ (void)sockfd;
+ (void)buf;
+ (void)flags;
+ return len;
+}
+
+RRDHOST *__wrap_rrdhost_find_by_hostname(const char *hostname, uint32_t hash)
+{
+ (void)hostname;
+ (void)hash;
+ return NULL;
+}
+
+/* Note: we've got some intricate code inside the global statistics module, might be useful to pull it inside the
+ test set instead of mocking it. */
+void __wrap_finished_web_request_statistics(
+ uint64_t dt, uint64_t bytes_received, uint64_t bytes_sent, uint64_t content_size, uint64_t compressed_content_size)
+{
+ (void)dt;
+ (void)bytes_received;
+ (void)bytes_sent;
+ (void)content_size;
+ (void)compressed_content_size;
+}
+
+char *__wrap_config_get(struct config *root, const char *section, const char *name, const char *default_value)
+{
+ (void)root;
+ (void)section;
+ (void)name;
+ (void)default_value;
+ return "UNKNOWN FIX ME";
+}
+
+int __wrap_web_client_api_request_v1(RRDHOST *host, struct web_client *w, char *url)
+{
+ char url_repr[160];
+ repr(url_repr, sizeof(url_repr), url, strlen(url));
+ printf("web_client_api_request_v1(url=\"%s\")\n", url_repr);
+ check_expected_ptr(host);
+ check_expected_ptr(w);
+ check_expected_ptr(url_repr);
+ return HTTP_RESP_OK;
+}
+
+int __wrap_mysendfile(struct web_client *w, char *filename)
+{
+ (void)w;
+ printf("mysendfile(filename=\"%s\"\n", filename);
+ check_expected_ptr(filename);
+ return HTTP_RESP_OK;
+}
+
+int __wrap_rrdpush_receiver_thread_spawn(RRDHOST *host, struct web_client *w, char *url)
+{
+ (void)host;
+ (void)w;
+ (void)url;
+ return 0;
+}
+
+RRDHOST *__wrap_rrdhost_find_by_guid(const char *guid, uint32_t hash)
+{
+ (void)guid;
+ (void)hash;
+ printf("FIXME: rrdset_find_guid\n");
+ return NULL;
+}
+
+RRDSET *__wrap_rrdset_find_byname(RRDHOST *host, const char *name)
+{
+ (void)host;
+ (void)name;
+ printf("FIXME: rrdset_find_byname\n");
+ return NULL;
+}
+
+RRDSET *__wrap_rrdset_find(RRDHOST *host, const char *id)
+{
+ (void)host;
+ (void)id;
+ printf("FIXME: rrdset_find\n");
+ return NULL;
+}
+
+// -------------------------------- Mocking the log - dump straight through --------------------------------------------
+
+void __wrap_debug_int(const char *file, const char *function, const unsigned long line, const char *fmt, ...)
+{
+ (void)file;
+ (void)function;
+ (void)line;
+ va_list args;
+ va_start(args, fmt);
+ printf(" DEBUG: ");
+ printf(fmt, args);
+ printf("\n");
+ va_end(args);
+}
+
+void __wrap_info_int(const char *file, const char *function, const unsigned long line, const char *fmt, ...)
+{
+ (void)file;
+ (void)function;
+ (void)line;
+ va_list args;
+ va_start(args, fmt);
+ printf(" INFO: ");
+ printf(fmt, args);
+ printf("\n");
+ va_end(args);
+}
+
+void __wrap_error_int(
+ const char *prefix, const char *file, const char *function, const unsigned long line, const char *fmt, ...)
+{
+ (void)prefix;
+ (void)file;
+ (void)function;
+ (void)line;
+ va_list args;
+ va_start(args, fmt);
+ printf(" ERROR: ");
+ printf(fmt, args);
+ printf("\n");
+ va_end(args);
+}
+
+void __wrap_fatal_int(const char *file, const char *function, const unsigned long line, const char *fmt, ...)
+{
+ (void)file;
+ (void)function;
+ (void)line;
+ va_list args;
+ va_start(args, fmt);
+ printf("FATAL: ");
+ printf(fmt, args);
+ printf("\n");
+ va_end(args);
+ fail();
+}
+
+WEB_SERVER_MODE web_server_mode = WEB_SERVER_MODE_STATIC_THREADED;
+char *netdata_configured_web_dir = "UNKNOWN FIXME";
+RRDHOST *localhost = NULL;
+
+struct config netdata_config = { .first_section = NULL,
+ .last_section = NULL,
+ .mutex = NETDATA_MUTEX_INITIALIZER,
+ .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
+ .rwlock = AVL_LOCK_INITIALIZER } };
+
+/* Note: this is not a CMocka group_test_setup/teardown pair. This is performed per-test.
+*/
+static struct web_client *setup_fresh_web_client()
+{
+ struct web_client *w = (struct web_client *)malloc(sizeof(struct web_client));
+ memset(w, 0, sizeof(struct web_client));
+ w->response.data = buffer_create(NETDATA_WEB_RESPONSE_INITIAL_SIZE);
+ w->response.header = buffer_create(NETDATA_WEB_RESPONSE_HEADER_SIZE);
+ w->response.header_output = buffer_create(NETDATA_WEB_RESPONSE_HEADER_SIZE);
+ strcpy(w->origin, "*"); // Simulate web_client_create_on_fd()
+ w->cookie1[0] = 0; // Simulate web_client_create_on_fd()
+ w->cookie2[0] = 0; // Simulate web_client_create_on_fd()
+ w->acl = 0x1f; // Everything on
+ return w;
+}
+
+static void destroy_web_client(struct web_client *w)
+{
+ buffer_free(w->response.data);
+ buffer_free(w->response.header);
+ buffer_free(w->response.header_output);
+ free(w);
+}
+
+//////////////////////////// Test cases ///////////////////////////////////////////////////////////////////////////////
+
+static void only_root(void **state)
+{
+ (void)state;
+
+ if (localhost != NULL)
+ free(localhost);
+ localhost = malloc(sizeof(RRDHOST));
+
+ struct web_client *w = setup_fresh_web_client();
+ buffer_strcat(w->response.data, "GET / HTTP/1.1\r\n\r\n");
+
+ char debug[4096];
+ repr(debug, sizeof(debug), w->response.data->buffer, w->response.data->len);
+ printf("-> \"%s\"\n", debug);
+
+ //char expected_url_repr[4096];
+ //repr(expected_url_repr, sizeof(expected_url_repr), def->url_out_repr, strlen(def->url_out_repr));
+
+ expect_string(__wrap_mysendfile, filename, "/");
+
+ web_client_process_request(w);
+
+ //assert_string_equal(w->decoded_query_string, def->query_out);
+ destroy_web_client(w);
+ free(localhost);
+ localhost = NULL;
+}
+
+static void two_slashes(void **state)
+{
+ (void)state;
+
+ if (localhost != NULL)
+ free(localhost);
+ localhost = malloc(sizeof(RRDHOST));
+
+ struct web_client *w = setup_fresh_web_client();
+ buffer_strcat(w->response.data, "GET // HTTP/1.1\r\n\r\n");
+
+ char debug[4096];
+ repr(debug, sizeof(debug), w->response.data->buffer, w->response.data->len);
+ printf("-> \"%s\"\n", debug);
+
+ //char expected_url_repr[4096];
+ //repr(expected_url_repr, sizeof(expected_url_repr), def->url_out_repr, strlen(def->url_out_repr));
+
+ expect_string(__wrap_mysendfile, filename, "//");
+
+ web_client_process_request(w);
+
+ //assert_string_equal(w->decoded_query_string, def->query_out);
+ destroy_web_client(w);
+ free(localhost);
+ localhost = NULL;
+}
+
+static void absolute_url(void **state)
+{
+ (void)state;
+
+ if (localhost != NULL)
+ free(localhost);
+ localhost = malloc(sizeof(RRDHOST));
+
+ struct web_client *w = setup_fresh_web_client();
+ buffer_strcat(w->response.data, "GET http://localhost:19999/api/v1/info HTTP/1.1\r\n\r\n");
+
+ char debug[4096];
+ repr(debug, sizeof(debug), w->response.data->buffer, w->response.data->len);
+ printf("-> \"%s\"\n", debug);
+
+ //char expected_url_repr[4096];
+ //repr(expected_url_repr, sizeof(expected_url_repr), def->url_out_repr, strlen(def->url_out_repr));
+
+ expect_value(__wrap_web_client_api_request_v1, host, localhost);
+ expect_value(__wrap_web_client_api_request_v1, w, w);
+ expect_string(__wrap_web_client_api_request_v1, url_repr, "info");
+
+ web_client_process_request(w);
+
+ assert_string_equal(w->decoded_query_string, "?blah");
+ destroy_web_client(w);
+ free(localhost);
+ localhost = NULL;
+}
+
+static void valid_url(void **state)
+{
+ (void)state;
+
+ if (localhost != NULL)
+ free(localhost);
+ localhost = malloc(sizeof(RRDHOST));
+
+ struct web_client *w = setup_fresh_web_client();
+ buffer_strcat(w->response.data, "GET /api/v1/info?blah HTTP/1.1\r\n\r\n");
+
+ char debug[4096];
+ repr(debug, sizeof(debug), w->response.data->buffer, w->response.data->len);
+ printf("-> \"%s\"\n", debug);
+
+ //char expected_url_repr[4096];
+ //repr(expected_url_repr, sizeof(expected_url_repr), def->url_out_repr, strlen(def->url_out_repr));
+
+ expect_value(__wrap_web_client_api_request_v1, host, localhost);
+ expect_value(__wrap_web_client_api_request_v1, w, w);
+ expect_string(__wrap_web_client_api_request_v1, url_repr, "info");
+
+ web_client_process_request(w);
+
+ assert_string_equal(w->decoded_query_string, "?blah");
+ destroy_web_client(w);
+ free(localhost);
+ localhost = NULL;
+}
+
+/* RFC2616, section 4.1:
+
+ In the interest of robustness, servers SHOULD ignore any empty
+ line(s) received where a Request-Line is expected. In other words, if
+ the server is reading the protocol stream at the beginning of a
+ message and receives a CRLF first, it should ignore the CRLF.
+*/
+static void leading_blanks(void **state)
+{
+ (void)state;
+
+ if (localhost != NULL)
+ free(localhost);
+ localhost = malloc(sizeof(RRDHOST));
+
+ struct web_client *w = setup_fresh_web_client();
+ buffer_strcat(w->response.data, "\r\n\r\nGET /api/v1/info?blah HTTP/1.1\r\n\r\n");
+
+ char debug[4096];
+ repr(debug, sizeof(debug), w->response.data->buffer, w->response.data->len);
+ printf("-> \"%s\"\n", debug);
+
+ //char expected_url_repr[4096];
+ //repr(expected_url_repr, sizeof(expected_url_repr), def->url_out_repr, strlen(def->url_out_repr));
+
+ expect_value(__wrap_web_client_api_request_v1, host, localhost);
+ expect_value(__wrap_web_client_api_request_v1, w, w);
+ expect_string(__wrap_web_client_api_request_v1, url_repr, "info");
+
+ web_client_process_request(w);
+
+ assert_string_equal(w->decoded_query_string, "?blah");
+ destroy_web_client(w);
+ free(localhost);
+ localhost = NULL;
+}
+
+static void empty_url(void **state)
+{
+ (void)state;
+
+ if (localhost != NULL)
+ free(localhost);
+ localhost = malloc(sizeof(RRDHOST));
+
+ struct web_client *w = setup_fresh_web_client();
+ buffer_strcat(w->response.data, "GET HTTP/1.1\r\n\r\n");
+
+ char debug[4096];
+ repr(debug, sizeof(debug), w->response.data->buffer, w->response.data->len);
+ printf("-> \"%s\"\n", debug);
+
+ //char expected_url_repr[4096];
+ //repr(expected_url_repr, sizeof(expected_url_repr), def->url_out_repr, strlen(def->url_out_repr));
+
+ expect_value(__wrap_web_client_api_request_v1, host, localhost);
+ expect_value(__wrap_web_client_api_request_v1, w, w);
+ expect_string(__wrap_web_client_api_request_v1, url_repr, "info");
+
+ web_client_process_request(w);
+
+ assert_string_equal(w->decoded_query_string, "?blah");
+ destroy_web_client(w);
+ free(localhost);
+ localhost = NULL;
+}
+
+/* If the %-escape is being performed at the correct time then the url should not be treated as a query, but instead
+ as a path "/api/v1/info?blah?" which should dispatch into the API with the given values.
+*/
+static void not_a_query(void **state)
+{
+ (void)state;
+ localhost = malloc(sizeof(RRDHOST));
+
+ struct web_client *w = setup_fresh_web_client();
+ buffer_strcat(w->response.data, "GET /api/v1/info%3fblah%3f HTTP/1.1\r\n\r\n");
+
+ char debug[160];
+ repr(debug, sizeof(debug), w->response.data->buffer, w->response.data->len);
+ printf("->%s\n", debug);
+
+ char expected_url_repr[160];
+ repr(expected_url_repr, sizeof(expected_url_repr), "info?blah?", 10);
+
+ expect_value(__wrap_web_client_api_request_v1, host, localhost);
+ expect_value(__wrap_web_client_api_request_v1, w, w);
+ expect_string(__wrap_web_client_api_request_v1, url_repr, expected_url_repr);
+
+ web_client_process_request(w);
+
+ assert_string_equal(w->decoded_query_string, "");
+ destroy_web_client(w);
+ free(localhost);
+}
+
+static void cr_in_url(void **state)
+{
+ (void)state;
+ localhost = malloc(sizeof(RRDHOST));
+
+ struct web_client *w = setup_fresh_web_client();
+ buffer_strcat(w->response.data, "GET /api/v1/inf\ro\t?blah HTTP/1.1\r\n\r\n");
+
+ char debug[160];
+ repr(debug, sizeof(debug), w->response.data->buffer, w->response.data->len);
+ printf("->%s\n", debug);
+
+ char expected_url_repr[160];
+ repr(expected_url_repr, sizeof(expected_url_repr), "inf\no\t", 6);
+
+ web_client_process_request(w);
+
+ assert_int_equal(w->response.code, HTTP_RESP_BAD_REQUEST);
+
+ destroy_web_client(w);
+ free(localhost);
+}
+static void newline_in_url(void **state)
+{
+ (void)state;
+ localhost = malloc(sizeof(RRDHOST));
+
+ struct web_client *w = setup_fresh_web_client();
+ buffer_strcat(w->response.data, "GET /api/v1/inf\no\t?blah HTTP/1.1\r\n\r\n");
+
+ char debug[160];
+ repr(debug, sizeof(debug), w->response.data->buffer, w->response.data->len);
+ printf("->%s\n", debug);
+
+ char expected_url_repr[160];
+ repr(expected_url_repr, sizeof(expected_url_repr), "inf\no\t", 6);
+
+ web_client_process_request(w);
+
+ assert_int_equal(w->response.code, HTTP_RESP_BAD_REQUEST);
+
+ destroy_web_client(w);
+ free(localhost);
+}
+
+static void bad_version(void **state)
+{
+ (void)state;
+ localhost = malloc(sizeof(RRDHOST));
+
+ struct web_client *w = setup_fresh_web_client();
+ buffer_strcat(w->response.data, "GET /api/v1/info?blah HTTP/1.2\r\n\r\n");
+
+ char debug[160];
+ repr(debug, sizeof(debug), w->response.data->buffer, w->response.data->len);
+ printf("->%s\n", debug);
+
+ char expected_url_repr[160];
+ repr(expected_url_repr, sizeof(expected_url_repr), "inf\no\t", 6);
+
+ web_client_process_request(w);
+
+ assert_int_equal(w->response.code, HTTP_RESP_BAD_REQUEST);
+
+ destroy_web_client(w);
+ free(localhost);
+}
+
+static void pathless_query(void **state)
+{
+ (void)state;
+ localhost = malloc(sizeof(RRDHOST));
+
+ struct web_client *w = setup_fresh_web_client();
+ buffer_strcat(w->response.data, "GET ?blah HTTP/1.1\r\n\r\n");
+
+ char debug[160];
+ repr(debug, sizeof(debug), w->response.data->buffer, w->response.data->len);
+ printf("->%s\n", debug);
+
+ char expected_url_repr[160];
+ repr(expected_url_repr, sizeof(expected_url_repr), "inf\no\t", 6);
+
+ web_client_process_request(w);
+
+ assert_int_equal(w->response.code, HTTP_RESP_BAD_REQUEST);
+
+ destroy_web_client(w);
+ free(localhost);
+}
+
+static void pathless_fragment(void **state)
+{
+ (void)state;
+ localhost = malloc(sizeof(RRDHOST));
+
+ struct web_client *w = setup_fresh_web_client();
+ buffer_strcat(w->response.data, "GET #blah HTTP/1.1\r\n\r\n");
+
+ char debug[160];
+ repr(debug, sizeof(debug), w->response.data->buffer, w->response.data->len);
+ printf("->%s\n", debug);
+
+ char expected_url_repr[160];
+ repr(expected_url_repr, sizeof(expected_url_repr), "inf\no\t", 6);
+
+ web_client_process_request(w);
+
+ assert_int_equal(w->response.code, HTTP_RESP_BAD_REQUEST);
+
+ destroy_web_client(w);
+ free(localhost);
+}
+
+static void short_percent(void **state)
+{
+ (void)state;
+ localhost = malloc(sizeof(RRDHOST));
+
+ struct web_client *w = setup_fresh_web_client();
+ buffer_strcat(w->response.data, "GET % HTTP/1.1\r\n\r\n");
+
+ char debug[160];
+ repr(debug, sizeof(debug), w->response.data->buffer, w->response.data->len);
+ printf("->%s\n", debug);
+
+ char expected_url_repr[160];
+ repr(expected_url_repr, sizeof(expected_url_repr), "inf\no\t", 6);
+
+ web_client_process_request(w);
+
+ assert_int_equal(w->response.code, HTTP_RESP_BAD_REQUEST);
+
+ destroy_web_client(w);
+ free(localhost);
+}
+
+static void short_percent2(void **state)
+{
+ (void)state;
+ localhost = malloc(sizeof(RRDHOST));
+
+ struct web_client *w = setup_fresh_web_client();
+ buffer_strcat(w->response.data, "GET %0 HTTP/1.1\r\n\r\n");
+
+ char debug[160];
+ repr(debug, sizeof(debug), w->response.data->buffer, w->response.data->len);
+ printf("->%s\n", debug);
+
+ char expected_url_repr[160];
+ repr(expected_url_repr, sizeof(expected_url_repr), "inf\no\t", 6);
+
+ web_client_process_request(w);
+
+ assert_int_equal(w->response.code, HTTP_RESP_BAD_REQUEST);
+
+ destroy_web_client(w);
+ free(localhost);
+}
+
+static void short_percent3(void **state)
+{
+ (void)state;
+ localhost = malloc(sizeof(RRDHOST));
+
+ struct web_client *w = setup_fresh_web_client();
+ buffer_strcat(w->response.data, "GET %");
+
+ char debug[160];
+ repr(debug, sizeof(debug), w->response.data->buffer, w->response.data->len);
+ printf("->%s\n", debug);
+
+ char expected_url_repr[160];
+ repr(expected_url_repr, sizeof(expected_url_repr), "inf\no\t", 6);
+
+ web_client_process_request(w);
+
+ assert_int_equal(w->response.code, HTTP_RESP_BAD_REQUEST);
+
+ destroy_web_client(w);
+ free(localhost);
+}
+
+static void percent_nulls(void **state)
+{
+ (void)state;
+ localhost = malloc(sizeof(RRDHOST));
+
+ struct web_client *w = setup_fresh_web_client();
+ buffer_strcat(w->response.data, "GET %00%00%00%00%00%00 HTTP/1.1\r\n");
+
+ char debug[160];
+ repr(debug, sizeof(debug), w->response.data->buffer, w->response.data->len);
+ printf("->%s\n", debug);
+
+ char expected_url_repr[160];
+ repr(expected_url_repr, sizeof(expected_url_repr), "inf\no\t", 6);
+
+ web_client_process_request(w);
+
+ assert_int_equal(w->response.code, HTTP_RESP_BAD_REQUEST);
+
+ destroy_web_client(w);
+ free(localhost);
+}
+
+static void percent_invalid(void **state)
+{
+ (void)state;
+ localhost = malloc(sizeof(RRDHOST));
+
+ struct web_client *w = setup_fresh_web_client();
+ buffer_strcat(w->response.data, "GET /%x%x%x%x%x%x HTTP/1.1\r\n");
+
+ char debug[160];
+ repr(debug, sizeof(debug), w->response.data->buffer, w->response.data->len);
+ printf("->%s\n", debug);
+
+ char expected_url_repr[160];
+ repr(expected_url_repr, sizeof(expected_url_repr), "inf\no\t", 6);
+
+ web_client_process_request(w);
+
+ assert_int_equal(w->response.code, HTTP_RESP_BAD_REQUEST);
+
+ destroy_web_client(w);
+ free(localhost);
+}
+
+static void space_in_url(void **state)
+{
+ (void)state;
+ localhost = malloc(sizeof(RRDHOST));
+
+ struct web_client *w = setup_fresh_web_client();
+ buffer_strcat(w->response.data, "GET / / HTTP/1.1\r\n\r\n");
+
+ char debug[160];
+ repr(debug, sizeof(debug), w->response.data->buffer, w->response.data->len);
+ printf("->%s\n", debug);
+
+ char expected_url_repr[160];
+ repr(expected_url_repr, sizeof(expected_url_repr), "inf\no\t", 6);
+
+ web_client_process_request(w);
+
+ assert_int_equal(w->response.code, HTTP_RESP_BAD_REQUEST);
+
+ destroy_web_client(w);
+ free(localhost);
+}
+
+static void random_sploit1(void **state)
+{
+ (void)state;
+ localhost = malloc(sizeof(RRDHOST));
+
+ struct web_client *w = setup_fresh_web_client();
+ // FIXME: Encoding probably needs to go through printf
+ buffer_need_bytes(w->response.data, 55);
+ memcpy(
+ w->response.data->buffer,
+ "GET \x03\x00\x00/*\xE0\x00\x00\x00\x00\x00Cookie: mstshash=Administr HTTP/1.1\r\n\r\n", 54);
+ w->response.data->len = 54;
+
+ char debug[160];
+ repr(debug, sizeof(debug), w->response.data->buffer, w->response.data->len);
+ printf("->%s\n", debug);
+
+ char expected_url_repr[160];
+ repr(expected_url_repr, sizeof(expected_url_repr), "inf\no\t", 6);
+
+ web_client_process_request(w);
+
+ assert_int_equal(w->response.code, HTTP_RESP_BAD_REQUEST);
+
+ destroy_web_client(w);
+ free(localhost);
+}
+
+static void null_in_url(void **state)
+{
+ (void)state;
+ localhost = malloc(sizeof(RRDHOST));
+
+ struct web_client *w = setup_fresh_web_client();
+ buffer_strcat(w->response.data, "GET / / HTTP/1.1\r\n\r\n");
+ w->response.data->buffer[5] = 0;
+
+ char debug[160];
+ repr(debug, sizeof(debug), w->response.data->buffer, w->response.data->len);
+ printf("->%s\n", debug);
+
+ char expected_url_repr[160];
+ repr(expected_url_repr, sizeof(expected_url_repr), "inf\no\t", 6);
+
+ web_client_process_request(w);
+
+ assert_int_equal(w->response.code, HTTP_RESP_BAD_REQUEST);
+
+ destroy_web_client(w);
+ free(localhost);
+}
+static void many_ands(void **state)
+{
+ (void)state;
+ localhost = malloc(sizeof(RRDHOST));
+
+ struct web_client *w = setup_fresh_web_client();
+ buffer_strcat(w->response.data, "GET foo?");
+ for (size_t i = 0; i < 600; i++)
+ buffer_strcat(w->response.data, "&");
+ buffer_strcat(w->response.data, " HTTP/1.1\r\n\r\n");
+
+ char debug[2048];
+ repr(debug, sizeof(debug), w->response.data->buffer, w->response.data->len);
+ printf("->%s\n", debug);
+
+ char expected_url_repr[160];
+ repr(expected_url_repr, sizeof(expected_url_repr), "inf\no\t", 6);
+
+ web_client_process_request(w);
+
+ assert_int_equal(w->response.code, HTTP_RESP_BAD_REQUEST);
+
+ destroy_web_client(w);
+ free(localhost);
+}
+
+int main(void)
+{
+ debug_flags = 0xffffffffffff;
+ int fails = 0;
+
+ struct CMUnitTest static_tests[] = {
+ cmocka_unit_test(only_root), cmocka_unit_test(two_slashes), cmocka_unit_test(valid_url),
+ cmocka_unit_test(leading_blanks), cmocka_unit_test(empty_url), cmocka_unit_test(newline_in_url),
+ cmocka_unit_test(not_a_query), cmocka_unit_test(cr_in_url), cmocka_unit_test(pathless_query),
+ cmocka_unit_test(pathless_fragment), cmocka_unit_test(short_percent), cmocka_unit_test(short_percent2),
+ cmocka_unit_test(short_percent3), cmocka_unit_test(percent_nulls), cmocka_unit_test(percent_invalid),
+ cmocka_unit_test(space_in_url), cmocka_unit_test(random_sploit1), cmocka_unit_test(null_in_url),
+ cmocka_unit_test(absolute_url),
+ // cmocka_unit_test(many_ands), CMocka cannot recover after this crash
+ cmocka_unit_test(bad_version)
+ };
+ (void)many_ands;
+
+ fails += cmocka_run_group_tests_name("static_tests", static_tests, NULL, NULL);
+ return fails;
+}
diff --git a/web/api/tests/web_api.c b/web/api/tests/web_api.c
new file mode 100644
index 0000000..93e6454
--- /dev/null
+++ b/web/api/tests/web_api.c
@@ -0,0 +1,473 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "libnetdata/libnetdata.h"
+#include "libnetdata/required_dummies.h"
+#include "database/rrd.h"
+#include "web/server/web_client.h"
+#include <setjmp.h>
+#include <cmocka.h>
+#include <stdbool.h>
+
+void free_temporary_host(RRDHOST *host)
+{
+ (void) host;
+}
+
+void *__wrap_free_temporary_host(RRDHOST *host)
+{
+ (void) host;
+ return NULL;
+}
+
+void repr(char *result, int result_size, char const *buf, int size)
+{
+ int n;
+ char *end = result + result_size - 1;
+ unsigned char const *ubuf = (unsigned char const *)buf;
+ while (size && result_size > 0) {
+ if (*ubuf <= 0x20 || *ubuf >= 0x80) {
+ n = snprintf(result, result_size, "\\%02X", *ubuf);
+ } else {
+ *result = *ubuf;
+ n = 1;
+ }
+ result += n;
+ result_size -= n;
+ ubuf++;
+ size--;
+ }
+ if (result_size > 0)
+ *(result++) = 0;
+ else
+ *end = 0;
+}
+
+// ---------------------------------- Mocking accesses from web_client ------------------------------------------------
+
+ssize_t send(int sockfd, const void *buf, size_t len, int flags)
+{
+ info("Mocking send: %zu bytes\n", len);
+ (void)sockfd;
+ (void)buf;
+ (void)flags;
+ return len;
+}
+
+RRDHOST *__wrap_rrdhost_find_by_hostname(const char *hostname, uint32_t hash)
+{
+ (void)hostname;
+ (void)hash;
+ return NULL;
+}
+
+/* Note: we've got some intricate code inside the global statistics module, might be useful to pull it inside the
+ test set instead of mocking it. */
+void __wrap_finished_web_request_statistics(
+ uint64_t dt, uint64_t bytes_received, uint64_t bytes_sent, uint64_t content_size, uint64_t compressed_content_size)
+{
+ (void)dt;
+ (void)bytes_received;
+ (void)bytes_sent;
+ (void)content_size;
+ (void)compressed_content_size;
+}
+
+char *__wrap_config_get(struct config *root, const char *section, const char *name, const char *default_value)
+{
+ (void)root;
+ (void)section;
+ (void)name;
+ (void)default_value;
+ return "UNKNOWN FIX ME";
+}
+
+int __wrap_web_client_api_request_v1(RRDHOST *host, struct web_client *w, char *url)
+{
+ char url_repr[160];
+ repr(url_repr, sizeof(url_repr), url, strlen(url));
+ info("web_client_api_request_v1(url=\"%s\")\n", url_repr);
+ check_expected_ptr(host);
+ check_expected_ptr(w);
+ check_expected_ptr(url_repr);
+ return HTTP_RESP_OK;
+}
+
+int __wrap_rrdpush_receiver_thread_spawn(RRDHOST *host, struct web_client *w, char *url)
+{
+ (void)host;
+ (void)w;
+ (void)url;
+ return 0;
+}
+
+RRDHOST *__wrap_rrdhost_find_by_guid(const char *guid, uint32_t hash)
+{
+ (void)guid;
+ (void)hash;
+ printf("FIXME: rrdset_find_guid\n");
+ return NULL;
+}
+
+RRDSET *__wrap_rrdset_find_byname(RRDHOST *host, const char *name)
+{
+ (void)host;
+ (void)name;
+ printf("FIXME: rrdset_find_byname\n");
+ return NULL;
+}
+
+RRDSET *__wrap_rrdset_find(RRDHOST *host, const char *id)
+{
+ (void)host;
+ (void)id;
+ printf("FIXME: rrdset_find\n");
+ return NULL;
+}
+
+// -------------------------------- Mocking the log - capture per-test ------------------------------------------------
+
+char log_buffer[10240] = { 0 };
+void __wrap_debug_int(const char *file, const char *function, const unsigned long line, const char *fmt, ...)
+{
+ (void)file;
+ (void)function;
+ (void)line;
+ va_list args;
+ va_start(args, fmt);
+ size_t cur = strlen(log_buffer);
+ snprintf(log_buffer + cur, sizeof(log_buffer) - cur, " DEBUG: ");
+ cur = strlen(log_buffer);
+ vsnprintf(log_buffer + cur, sizeof(log_buffer) - cur, fmt, args);
+ cur = strlen(log_buffer);
+ snprintf(log_buffer + cur, sizeof(log_buffer) - cur, "\n");
+ va_end(args);
+}
+
+void __wrap_info_int(const char *file, const char *function, const unsigned long line, const char *fmt, ...)
+{
+ (void)file;
+ (void)function;
+ (void)line;
+ va_list args;
+ va_start(args, fmt);
+ size_t cur = strlen(log_buffer);
+ snprintf(log_buffer + cur, sizeof(log_buffer) - cur, " INFO: ");
+ cur = strlen(log_buffer);
+ vsnprintf(log_buffer + cur, sizeof(log_buffer) - cur, fmt, args);
+ cur = strlen(log_buffer);
+ snprintf(log_buffer + cur, sizeof(log_buffer) - cur, "\n");
+ va_end(args);
+}
+
+void __wrap_error_int(
+ const char *prefix, const char *file, const char *function, const unsigned long line, const char *fmt, ...)
+{
+ (void)prefix;
+ (void)file;
+ (void)function;
+ (void)line;
+ va_list args;
+ va_start(args, fmt);
+ size_t cur = strlen(log_buffer);
+ snprintf(log_buffer + cur, sizeof(log_buffer) - cur, " ERROR: ");
+ cur = strlen(log_buffer);
+ vsnprintf(log_buffer + cur, sizeof(log_buffer) - cur, fmt, args);
+ cur = strlen(log_buffer);
+ snprintf(log_buffer + cur, sizeof(log_buffer) - cur, "\n");
+ va_end(args);
+}
+
+void __wrap_fatal_int(const char *file, const char *function, const unsigned long line, const char *fmt, ...)
+{
+ (void)file;
+ (void)function;
+ (void)line;
+ va_list args;
+ va_start(args, fmt);
+ printf("FATAL: ");
+ vprintf(fmt, args);
+ printf("\n");
+ va_end(args);
+ fail();
+}
+
+WEB_SERVER_MODE web_server_mode = WEB_SERVER_MODE_STATIC_THREADED;
+char *netdata_configured_web_dir = "UNKNOWN FIXME";
+RRDHOST *localhost = NULL;
+
+struct config netdata_config = { .first_section = NULL,
+ .last_section = NULL,
+ .mutex = NETDATA_MUTEX_INITIALIZER,
+ .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
+ .rwlock = AVL_LOCK_INITIALIZER } };
+
+const char *http_headers[] = { "Host: 254.254.0.1",
+ "User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_" // No ,
+ "0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36",
+ "Connection: keep-alive",
+ "X-Forwarded-For: 1.254.1.251",
+ "Cookie: _ga=GA1.1.1227576758.1571113676; _gid=GA1.2.1222321739.1573628979",
+ "X-Requested-With: XMLHttpRequest",
+ "Accept-Encoding: gzip, deflate",
+ "Cache-Control: no-cache, no-store" };
+#define MAX_HEADERS (sizeof(http_headers) / (sizeof(const char *)))
+
+static void build_request(struct web_buffer *wb, const char *url, bool use_cr, size_t num_headers)
+{
+ buffer_reset(wb);
+ buffer_strcat(wb, "GET ");
+ buffer_strcat(wb, url);
+ buffer_strcat(wb, " HTTP/1.1");
+ if (use_cr)
+ buffer_strcat(wb, "\r");
+ buffer_strcat(wb, "\n");
+ for (size_t i = 0; i < num_headers && i < MAX_HEADERS; i++) {
+ buffer_strcat(wb, http_headers[i]);
+ if (use_cr)
+ buffer_strcat(wb, "\r");
+ buffer_strcat(wb, "\n");
+ }
+ if (use_cr)
+ buffer_strcat(wb, "\r");
+ buffer_strcat(wb, "\n");
+}
+
+/* Note: this is not a CMocka group_test_setup/teardown pair. This is performed per-test.
+*/
+static struct web_client *setup_fresh_web_client()
+{
+ struct web_client *w = (struct web_client *)malloc(sizeof(struct web_client));
+ memset(w, 0, sizeof(struct web_client));
+ w->response.data = buffer_create(NETDATA_WEB_RESPONSE_INITIAL_SIZE);
+ w->response.data->date = 0; // Valgrind uninitialised value
+ w->response.data->expires = 0; // Valgrind uninitialised value
+ w->response.data->options = 0; // Valgrind uninitialised value
+ w->response.header = buffer_create(NETDATA_WEB_RESPONSE_HEADER_SIZE);
+ w->response.header_output = buffer_create(NETDATA_WEB_RESPONSE_HEADER_SIZE);
+ strcpy(w->origin, "*"); // Simulate web_client_create_on_fd()
+ w->cookie1[0] = 0; // Simulate web_client_create_on_fd()
+ w->cookie2[0] = 0; // Simulate web_client_create_on_fd()
+ w->acl = 0x1f; // Everything on
+ return w;
+}
+
+static void destroy_web_client(struct web_client *w)
+{
+ buffer_free(w->response.data);
+ buffer_free(w->response.header);
+ buffer_free(w->response.header_output);
+ free(w);
+}
+
+// ---------------------------------- Parameterized test-families -----------------------------------------------------
+// There is no way to pass a parameter block into the setup fixture, we would have to patch CMocka and maintain it
+// locally. (The void **current_state in _run_group_tests would be set from a parameter). This is unfortunate as a
+// parametric unit-tester needs to be to pass parameters to the fixtures. We are faking this by calculating the
+// space of tests in the launcher, passing an array of identical unit-tests to CMocka and then counting through the
+// parameters in the shared state passed between tests. To initialise this counter structure we use this global to
+// pass from the launcher (test-builder) to the setup-fixture.
+
+void *shared_test_state = NULL;
+
+// -------------------------------- Test family for /api/v1/info ------------------------------------------------------
+
+struct test_def {
+ size_t num_headers; // Index coordinate
+ size_t prefix_len; // Index coordinate
+ char name[80];
+ size_t full_len;
+ struct web_client *instance; // Used within this single test
+ bool completed, use_cr;
+ struct test_def *next, *prev;
+};
+
+static void api_info(void **state)
+{
+ (void)state;
+ struct test_def *def = (struct test_def *)shared_test_state;
+ shared_test_state = def->next;
+
+ if (def->prev != NULL && !def->prev->completed && strlen(log_buffer) > 0) {
+ printf("Log of failing case %s:\n", def->prev->name);
+ puts(log_buffer);
+ }
+ log_buffer[0] = 0;
+ if (localhost != NULL)
+ free(localhost);
+ localhost = calloc(1,sizeof(RRDHOST));
+
+ def->instance = setup_fresh_web_client();
+ build_request(def->instance->response.data, "/api/v1/info", def->use_cr, def->num_headers);
+ def->instance->response.data->len = def->prefix_len;
+
+ char buffer_repr[1024];
+ repr(buffer_repr, sizeof(buffer_repr), def->instance->response.data->buffer,def->prefix_len);
+ info("Buffer contains: %s [first %zu]", buffer_repr,def->prefix_len);
+ if (def->prefix_len == def->full_len) {
+ expect_value(__wrap_web_client_api_request_v1, host, localhost);
+ expect_value(__wrap_web_client_api_request_v1, w, def->instance);
+ expect_string(__wrap_web_client_api_request_v1, url_repr, "info");
+ }
+
+ web_client_process_request(def->instance);
+
+ if (def->prefix_len == def->full_len)
+ assert_int_equal(def->instance->flags & WEB_CLIENT_FLAG_WAIT_RECEIVE, 0);
+ else
+ assert_int_equal(def->instance->flags & WEB_CLIENT_FLAG_WAIT_RECEIVE, WEB_CLIENT_FLAG_WAIT_RECEIVE);
+ assert_int_equal(def->instance->mode, WEB_CLIENT_MODE_NORMAL);
+ def->completed = true;
+ log_buffer[0] = 0;
+}
+
+static int api_info_launcher()
+{
+ size_t num_tests = 0;
+ struct web_client *template = setup_fresh_web_client();
+ struct test_def *current, *head = NULL;
+ struct test_def *prev = NULL;
+
+ for (size_t i = 0; i < MAX_HEADERS; i++) {
+ build_request(template->response.data, "/api/v1/info", true, i);
+ for (size_t j = 0; j <= template->response.data->len; j++) {
+ if (j == 0 && i > 0)
+ continue; // All zero-length prefixes are identical, skip after first time
+ current = malloc(sizeof(struct test_def));
+ if (prev != NULL)
+ prev->next = current;
+ else
+ head = current;
+ current->prev = prev;
+ prev = current;
+
+ current->num_headers = i;
+ current->prefix_len = j;
+ current->full_len = template->response.data->len;
+ current->instance = NULL;
+ current->next = NULL;
+ current->use_cr = true;
+ current->completed = false;
+ sprintf(
+ current->name, "/api/v1/info@%zu,%zu/%zu+%d", current->num_headers, current->prefix_len,
+ current->full_len,true);
+ num_tests++;
+ }
+ }
+ for (size_t i = 0; i < MAX_HEADERS; i++) {
+ build_request(template->response.data, "/api/v1/info", false, i);
+ for (size_t j = 0; j <= template->response.data->len; j++) {
+ if (j == 0 && i > 0)
+ continue; // All zero-length prefixes are identical, skip after first time
+ current = malloc(sizeof(struct test_def));
+ if (prev != NULL)
+ prev->next = current;
+ else
+ head = current;
+ current->prev = prev;
+ prev = current;
+
+ current->num_headers = i;
+ current->prefix_len = j;
+ current->full_len = template->response.data->len;
+ current->instance = NULL;
+ current->next = NULL;
+ current->use_cr = false;
+ current->completed = false;
+ sprintf(
+ current->name, "/api/v1/info@%zu,%zu/%zu+%d", current->num_headers, current->prefix_len,
+ current->full_len,false);
+ num_tests++;
+ }
+ }
+
+ struct CMUnitTest *tests = calloc(num_tests, sizeof(struct CMUnitTest));
+ current = head;
+ for (size_t i = 0; i < num_tests; i++) {
+ tests[i].name = current->name;
+ tests[i].test_func = api_info;
+ tests[i].setup_func = NULL;
+ tests[i].teardown_func = NULL;
+ tests[i].initial_state = NULL;
+ current = current->next;
+ }
+
+ printf("Setup %zu tests in %p\n", num_tests, head);
+ shared_test_state = head;
+ int fails = _cmocka_run_group_tests("web_api", tests, num_tests, NULL, NULL);
+ free(tests);
+ destroy_web_client(template);
+ current = head;
+ while (current != NULL) {
+ struct test_def *c = current;
+ current = current->next;
+ if (c->instance != NULL) // Clean up resources from tests that failed
+ destroy_web_client(c->instance);
+ free(c);
+ }
+ if (localhost!=NULL)
+ free(localhost);
+ return fails;
+}
+
+/* Raw notes for the cases that we did not use in the unit testing suite.
+ Leaving them here instead of deleting them in-case we expand the suite during the
+ work on the URL parser.
+
+ Any ' ' in the URI -> invalid response (Description in 5.1 of RFC2616)
+ Characters that can't be in paths #;?
+ "GET /apb/../api/v1/info" HTTP/1.1\r\n"
+
+ https://github.com/uriparser/uriparser/blob/uriparser-0.9.3/test/FourSuite.cpp
+ Not clear why some of these are illegal -> reserved chars?
+
+ ASSERT_TRUE(testBadUri("beepbeep\x07\x07", 8));
+ ASSERT_TRUE(testBadUri("\n", 0));
+ ASSERT_TRUE(testBadUri("::", 0)); // not OK, per Roy Fielding on the W3C uri list on 2004-04-01
+
+ // the following test cases are from a Perl script by David A. Wheeler
+ // at http://www.dwheeler.com/secure-programs/url.pl
+ ASSERT_TRUE(testBadUri("http://www yahoo.com", 10));
+ ASSERT_TRUE(testBadUri("http://www.yahoo.com/hello world/", 26));
+ ASSERT_TRUE(testBadUri("http://www.yahoo.com/yelp.html#\"", 31));
+
+ // the following test cases are from a Haskell program by Graham Klyne
+ // at http://www.ninebynine.org/Software/HaskellUtils/Network/URITest.hs
+ ASSERT_TRUE(testBadUri("[2010:836B:4179::836B:4179]", 0));
+ ASSERT_TRUE(testBadUri(" ", 0));
+ ASSERT_TRUE(testBadUri("%", 1));
+ ASSERT_TRUE(testBadUri("A%Z", 2));
+ ASSERT_TRUE(testBadUri("%ZZ", 1));
+ ASSERT_TRUE(testBadUri("%AZ", 2));
+ ASSERT_TRUE(testBadUri("A C", 1));
+ ASSERT_TRUE(testBadUri("A\\'C", 1)); // r"A\'C"
+ ASSERT_TRUE(testBadUri("A`C", 1));
+ ASSERT_TRUE(testBadUri("A<C", 1));
+ ASSERT_TRUE(testBadUri("A>C", 1));
+ ASSERT_TRUE(testBadUri("A^C", 1));
+ ASSERT_TRUE(testBadUri("A\\\\C", 1)); // r'A\\C'
+ ASSERT_TRUE(testBadUri("A{C", 1));
+ ASSERT_TRUE(testBadUri("A|C", 1));
+ ASSERT_TRUE(testBadUri("A}C", 1));
+ ASSERT_TRUE(testBadUri("A[C", 1));
+ ASSERT_TRUE(testBadUri("A]C", 1));
+ ASSERT_TRUE(testBadUri("A[**]C", 1));
+ ASSERT_TRUE(testBadUri("http://[xyz]/", 8));
+ ASSERT_TRUE(testBadUri("http://]/", 7));
+ ASSERT_TRUE(testBadUri("http://example.org/[2010:836B:4179::836B:4179]", 19));
+ ASSERT_TRUE(testBadUri("http://example.org/abc#[2010:836B:4179::836B:4179]", 23));
+ ASSERT_TRUE(testBadUri("http://example.org/xxx/[qwerty]#a[b]", 23));
+
+ // from a post to the W3C uri list on 2004-02-17
+ // breaks at 22 instead of 17 because everything up to that point is a valid userinfo
+ ASSERT_TRUE(testBadUri("http://w3c.org:80path1/path2", 22));
+
+*/
+
+int main(void)
+{
+ debug_flags = 0xffffffffffff;
+ int fails = 0;
+ fails += api_info_launcher();
+
+ return fails;
+}
diff --git a/web/api/web_api_v1.c b/web/api/web_api_v1.c
new file mode 100644
index 0000000..93f501f
--- /dev/null
+++ b/web/api/web_api_v1.c
@@ -0,0 +1,1707 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "web_api_v1.h"
+
+char *api_secret;
+
+static struct {
+ const char *name;
+ uint32_t hash;
+ RRDR_OPTIONS value;
+} api_v1_data_options[] = {
+ { "nonzero" , 0 , RRDR_OPTION_NONZERO}
+ , {"flip" , 0 , RRDR_OPTION_REVERSED}
+ , {"reversed" , 0 , RRDR_OPTION_REVERSED}
+ , {"reverse" , 0 , RRDR_OPTION_REVERSED}
+ , {"jsonwrap" , 0 , RRDR_OPTION_JSON_WRAP}
+ , {"min2max" , 0 , RRDR_OPTION_MIN2MAX}
+ , {"ms" , 0 , RRDR_OPTION_MILLISECONDS}
+ , {"milliseconds" , 0 , RRDR_OPTION_MILLISECONDS}
+ , {"abs" , 0 , RRDR_OPTION_ABSOLUTE}
+ , {"absolute" , 0 , RRDR_OPTION_ABSOLUTE}
+ , {"absolute_sum" , 0 , RRDR_OPTION_ABSOLUTE}
+ , {"absolute-sum" , 0 , RRDR_OPTION_ABSOLUTE}
+ , {"display_absolute" , 0 , RRDR_OPTION_DISPLAY_ABS}
+ , {"display-absolute" , 0 , RRDR_OPTION_DISPLAY_ABS}
+ , {"seconds" , 0 , RRDR_OPTION_SECONDS}
+ , {"null2zero" , 0 , RRDR_OPTION_NULL2ZERO}
+ , {"objectrows" , 0 , RRDR_OPTION_OBJECTSROWS}
+ , {"google_json" , 0 , RRDR_OPTION_GOOGLE_JSON}
+ , {"google-json" , 0 , RRDR_OPTION_GOOGLE_JSON}
+ , {"percentage" , 0 , RRDR_OPTION_PERCENTAGE}
+ , {"unaligned" , 0 , RRDR_OPTION_NOT_ALIGNED}
+ , {"match_ids" , 0 , RRDR_OPTION_MATCH_IDS}
+ , {"match-ids" , 0 , RRDR_OPTION_MATCH_IDS}
+ , {"match_names" , 0 , RRDR_OPTION_MATCH_NAMES}
+ , {"match-names" , 0 , RRDR_OPTION_MATCH_NAMES}
+ , {"anomaly-bit" , 0 , RRDR_OPTION_ANOMALY_BIT}
+ , {"selected-tier" , 0 , RRDR_OPTION_SELECTED_TIER}
+ , {"raw" , 0 , RRDR_OPTION_RETURN_RAW}
+ , {"jw-anomaly-rates" , 0 , RRDR_OPTION_RETURN_JWAR}
+ , {"natural-points" , 0 , RRDR_OPTION_NATURAL_POINTS}
+ , {"virtual-points" , 0 , RRDR_OPTION_VIRTUAL_POINTS}
+ , {"all-dimensions" , 0 , RRDR_OPTION_ALL_DIMENSIONS}
+ , {NULL , 0 , 0}
+};
+
+static struct {
+ const char *name;
+ uint32_t hash;
+ DATASOURCE_FORMAT value;
+} api_v1_data_formats[] = {
+ { DATASOURCE_FORMAT_DATATABLE_JSON , 0 , DATASOURCE_DATATABLE_JSON}
+ , {DATASOURCE_FORMAT_DATATABLE_JSONP, 0 , DATASOURCE_DATATABLE_JSONP}
+ , {DATASOURCE_FORMAT_JSON , 0 , DATASOURCE_JSON}
+ , {DATASOURCE_FORMAT_JSONP , 0 , DATASOURCE_JSONP}
+ , {DATASOURCE_FORMAT_SSV , 0 , DATASOURCE_SSV}
+ , {DATASOURCE_FORMAT_CSV , 0 , DATASOURCE_CSV}
+ , {DATASOURCE_FORMAT_TSV , 0 , DATASOURCE_TSV}
+ , {"tsv-excel" , 0 , DATASOURCE_TSV}
+ , {DATASOURCE_FORMAT_HTML , 0 , DATASOURCE_HTML}
+ , {DATASOURCE_FORMAT_JS_ARRAY , 0 , DATASOURCE_JS_ARRAY}
+ , {DATASOURCE_FORMAT_SSV_COMMA , 0 , DATASOURCE_SSV_COMMA}
+ , {DATASOURCE_FORMAT_CSV_JSON_ARRAY , 0 , DATASOURCE_CSV_JSON_ARRAY}
+ , {DATASOURCE_FORMAT_CSV_MARKDOWN , 0 , DATASOURCE_CSV_MARKDOWN}
+ , { NULL, 0, 0}
+};
+
+static struct {
+ const char *name;
+ uint32_t hash;
+ DATASOURCE_FORMAT value;
+} api_v1_data_google_formats[] = {
+ // this is not error - when google requests json, it expects javascript
+ // https://developers.google.com/chart/interactive/docs/dev/implementing_data_source#responseformat
+ { "json" , 0 , DATASOURCE_DATATABLE_JSONP}
+ , {"html" , 0 , DATASOURCE_HTML}
+ , {"csv" , 0 , DATASOURCE_CSV}
+ , {"tsv-excel", 0 , DATASOURCE_TSV}
+ , { NULL, 0, 0}
+};
+
+void web_client_api_v1_init(void) {
+ int i;
+
+ for(i = 0; api_v1_data_options[i].name ; i++)
+ api_v1_data_options[i].hash = simple_hash(api_v1_data_options[i].name);
+
+ for(i = 0; api_v1_data_formats[i].name ; i++)
+ api_v1_data_formats[i].hash = simple_hash(api_v1_data_formats[i].name);
+
+ for(i = 0; api_v1_data_google_formats[i].name ; i++)
+ api_v1_data_google_formats[i].hash = simple_hash(api_v1_data_google_formats[i].name);
+
+ web_client_api_v1_init_grouping();
+
+ uuid_t uuid;
+
+ // generate
+ uuid_generate(uuid);
+
+ // unparse (to string)
+ char uuid_str[37];
+ uuid_unparse_lower(uuid, uuid_str);
+}
+
+char *get_mgmt_api_key(void) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/netdata.api.key", netdata_configured_varlib_dir);
+ char *api_key_filename=config_get(CONFIG_SECTION_REGISTRY, "netdata management api key file", filename);
+ static char guid[GUID_LEN + 1] = "";
+
+ if(likely(guid[0]))
+ return guid;
+
+ // read it from disk
+ int fd = open(api_key_filename, O_RDONLY);
+ if(fd != -1) {
+ char buf[GUID_LEN + 1];
+ if(read(fd, buf, GUID_LEN) != GUID_LEN)
+ error("Failed to read management API key from '%s'", api_key_filename);
+ else {
+ buf[GUID_LEN] = '\0';
+ if(regenerate_guid(buf, guid) == -1) {
+ error("Failed to validate management API key '%s' from '%s'.",
+ buf, api_key_filename);
+
+ guid[0] = '\0';
+ }
+ }
+ close(fd);
+ }
+
+ // generate a new one?
+ if(!guid[0]) {
+ uuid_t uuid;
+
+ uuid_generate_time(uuid);
+ uuid_unparse_lower(uuid, guid);
+ guid[GUID_LEN] = '\0';
+
+ // save it
+ fd = open(api_key_filename, O_WRONLY|O_CREAT|O_TRUNC, 444);
+ if(fd == -1) {
+ error("Cannot create unique management API key file '%s'. Please adjust config parameter 'netdata management api key file' to a proper path and file.", api_key_filename);
+ goto temp_key;
+ }
+
+ if(write(fd, guid, GUID_LEN) != GUID_LEN) {
+ error("Cannot write the unique management API key file '%s'. Please adjust config parameter 'netdata management api key file' to a proper path and file with enough space left.", api_key_filename);
+ close(fd);
+ goto temp_key;
+ }
+
+ close(fd);
+ }
+
+ return guid;
+
+temp_key:
+ info("You can still continue to use the alarm management API using the authorization token %s during this Netdata session only.", guid);
+ return guid;
+}
+
+void web_client_api_v1_management_init(void) {
+ api_secret = get_mgmt_api_key();
+}
+
+inline RRDR_OPTIONS web_client_api_request_v1_data_options(char *o) {
+ RRDR_OPTIONS ret = 0x00000000;
+ char *tok;
+
+ while(o && *o && (tok = mystrsep(&o, ", |"))) {
+ if(!*tok) continue;
+
+ uint32_t hash = simple_hash(tok);
+ int i;
+ for(i = 0; api_v1_data_options[i].name ; i++) {
+ if (unlikely(hash == api_v1_data_options[i].hash && !strcmp(tok, api_v1_data_options[i].name))) {
+ ret |= api_v1_data_options[i].value;
+ break;
+ }
+ }
+ }
+
+ return ret;
+}
+
+void web_client_api_request_v1_data_options_to_buffer(BUFFER *wb, RRDR_OPTIONS options) {
+ RRDR_OPTIONS used = 0; // to prevent adding duplicates
+ int added = 0;
+ for(int i = 0; api_v1_data_options[i].name ; i++) {
+ if (unlikely((api_v1_data_options[i].value & options) && !(api_v1_data_options[i].value & used))) {
+ const char *name = api_v1_data_options[i].name;
+ used |= api_v1_data_options[i].value;
+
+ if(added) buffer_strcat(wb, ",");
+ buffer_strcat(wb, name);
+
+ added++;
+ }
+ }
+}
+
+void web_client_api_request_v1_data_options_to_string(char *buf, size_t size, RRDR_OPTIONS options) {
+ char *write = buf;
+ char *end = &buf[size - 1];
+
+ RRDR_OPTIONS used = 0; // to prevent adding duplicates
+ int added = 0;
+ for(int i = 0; api_v1_data_options[i].name ; i++) {
+ if (unlikely((api_v1_data_options[i].value & options) && !(api_v1_data_options[i].value & used))) {
+ const char *name = api_v1_data_options[i].name;
+ used |= api_v1_data_options[i].value;
+
+ if(added && write < end)
+ *write++ = ',';
+
+ while(*name && write < end)
+ *write++ = *name++;
+
+ added++;
+ }
+ }
+ *write = *end = '\0';
+}
+
+inline DATASOURCE_FORMAT web_client_api_request_v1_data_format(char *name) {
+ uint32_t hash = simple_hash(name);
+ int i;
+
+ for(i = 0; api_v1_data_formats[i].name ; i++) {
+ if (unlikely(hash == api_v1_data_formats[i].hash && !strcmp(name, api_v1_data_formats[i].name))) {
+ return api_v1_data_formats[i].value;
+ }
+ }
+
+ return DATASOURCE_JSON;
+}
+
+inline uint32_t web_client_api_request_v1_data_google_format(char *name) {
+ uint32_t hash = simple_hash(name);
+ int i;
+
+ for(i = 0; api_v1_data_google_formats[i].name ; i++) {
+ if (unlikely(hash == api_v1_data_google_formats[i].hash && !strcmp(name, api_v1_data_google_formats[i].name))) {
+ return api_v1_data_google_formats[i].value;
+ }
+ }
+
+ return DATASOURCE_JSON;
+}
+
+int web_client_api_request_v1_alarms_select (char *url) {
+ int all = 0;
+ while(url) {
+ char *value = mystrsep(&url, "&");
+ if (!value || !*value) continue;
+
+ if(!strcmp(value, "all")) all = 1;
+ else if(!strcmp(value, "active")) all = 0;
+ }
+
+ return all;
+}
+
+inline int web_client_api_request_v1_alarms(RRDHOST *host, struct web_client *w, char *url) {
+ int all = web_client_api_request_v1_alarms_select(url);
+
+ buffer_flush(w->response.data);
+ w->response.data->contenttype = CT_APPLICATION_JSON;
+ health_alarms2json(host, w->response.data, all);
+ buffer_no_cacheable(w->response.data);
+ return HTTP_RESP_OK;
+}
+
+inline int web_client_api_request_v1_alarms_values(RRDHOST *host, struct web_client *w, char *url) {
+ int all = web_client_api_request_v1_alarms_select(url);
+
+ buffer_flush(w->response.data);
+ w->response.data->contenttype = CT_APPLICATION_JSON;
+ health_alarms_values2json(host, w->response.data, all);
+ buffer_no_cacheable(w->response.data);
+ return HTTP_RESP_OK;
+}
+
+inline int web_client_api_request_v1_alarm_count(RRDHOST *host, struct web_client *w, char *url) {
+ RRDCALC_STATUS status = RRDCALC_STATUS_RAISED;
+ BUFFER *contexts = NULL;
+
+ buffer_flush(w->response.data);
+ buffer_sprintf(w->response.data, "[");
+
+ while(url) {
+ char *value = mystrsep(&url, "&");
+ if(!value || !*value) continue;
+
+ char *name = mystrsep(&value, "=");
+ if(!name || !*name) continue;
+ if(!value || !*value) continue;
+
+ debug(D_WEB_CLIENT, "%llu: API v1 alarm_count query param '%s' with value '%s'", w->id, name, value);
+
+ char* p = value;
+ if(!strcmp(name, "status")) {
+ while ((*p = toupper(*p))) p++;
+ if (!strcmp("CRITICAL", value)) status = RRDCALC_STATUS_CRITICAL;
+ else if (!strcmp("WARNING", value)) status = RRDCALC_STATUS_WARNING;
+ else if (!strcmp("UNINITIALIZED", value)) status = RRDCALC_STATUS_UNINITIALIZED;
+ else if (!strcmp("UNDEFINED", value)) status = RRDCALC_STATUS_UNDEFINED;
+ else if (!strcmp("REMOVED", value)) status = RRDCALC_STATUS_REMOVED;
+ else if (!strcmp("CLEAR", value)) status = RRDCALC_STATUS_CLEAR;
+ }
+ else if(!strcmp(name, "context") || !strcmp(name, "ctx")) {
+ if(!contexts) contexts = buffer_create(255);
+ buffer_strcat(contexts, "|");
+ buffer_strcat(contexts, value);
+ }
+ }
+
+ health_aggregate_alarms(host, w->response.data, contexts, status);
+
+ buffer_sprintf(w->response.data, "]\n");
+ w->response.data->contenttype = CT_APPLICATION_JSON;
+ buffer_no_cacheable(w->response.data);
+
+ buffer_free(contexts);
+ return 200;
+}
+
+inline int web_client_api_request_v1_alarm_log(RRDHOST *host, struct web_client *w, char *url) {
+ uint32_t after = 0;
+ char *chart = NULL;
+
+ while(url) {
+ char *value = mystrsep(&url, "&");
+ if (!value || !*value) continue;
+
+ char *name = mystrsep(&value, "=");
+ if(!name || !*name) continue;
+ if(!value || !*value) continue;
+
+ if (!strcmp(name, "after")) after = (uint32_t)strtoul(value, NULL, 0);
+ else if (!strcmp(name, "chart")) chart = value;
+ }
+
+ buffer_flush(w->response.data);
+ w->response.data->contenttype = CT_APPLICATION_JSON;
+ health_alarm_log2json(host, w->response.data, after, chart);
+ return HTTP_RESP_OK;
+}
+
+inline int web_client_api_request_single_chart(RRDHOST *host, struct web_client *w, char *url, void callback(RRDSET *st, BUFFER *buf)) {
+ int ret = HTTP_RESP_BAD_REQUEST;
+ char *chart = NULL;
+
+ buffer_flush(w->response.data);
+
+ while(url) {
+ char *value = mystrsep(&url, "&");
+ if(!value || !*value) continue;
+
+ char *name = mystrsep(&value, "=");
+ if(!name || !*name) continue;
+ if(!value || !*value) continue;
+
+ // name and value are now the parameters
+ // they are not null and not empty
+
+ if(!strcmp(name, "chart")) chart = value;
+ //else {
+ /// buffer_sprintf(w->response.data, "Unknown parameter '%s' in request.", name);
+ // goto cleanup;
+ //}
+ }
+
+ if(!chart || !*chart) {
+ buffer_sprintf(w->response.data, "No chart id is given at the request.");
+ goto cleanup;
+ }
+
+ RRDSET *st = rrdset_find(host, chart);
+ if(!st) st = rrdset_find_byname(host, chart);
+ if(!st) {
+ buffer_strcat(w->response.data, "Chart is not found: ");
+ buffer_strcat_htmlescape(w->response.data, chart);
+ ret = HTTP_RESP_NOT_FOUND;
+ goto cleanup;
+ }
+
+ w->response.data->contenttype = CT_APPLICATION_JSON;
+ st->last_accessed_time = now_realtime_sec();
+ callback(st, w->response.data);
+ return HTTP_RESP_OK;
+
+ cleanup:
+ return ret;
+}
+
+inline int web_client_api_request_v1_alarm_variables(RRDHOST *host, struct web_client *w, char *url) {
+ return web_client_api_request_single_chart(host, w, url, health_api_v1_chart_variables2json);
+}
+
+static RRDCONTEXT_TO_JSON_OPTIONS rrdcontext_to_json_parse_options(char *o) {
+ RRDCONTEXT_TO_JSON_OPTIONS options = RRDCONTEXT_OPTION_NONE;
+ char *tok;
+
+ while(o && *o && (tok = mystrsep(&o, ", |"))) {
+ if(!*tok) continue;
+
+ if(!strcmp(tok, "full") || !strcmp(tok, "all"))
+ options |= RRDCONTEXT_OPTIONS_ALL;
+ else if(!strcmp(tok, "charts") || !strcmp(tok, "instances"))
+ options |= RRDCONTEXT_OPTION_SHOW_INSTANCES;
+ else if(!strcmp(tok, "dimensions") || !strcmp(tok, "metrics"))
+ options |= RRDCONTEXT_OPTION_SHOW_METRICS;
+ else if(!strcmp(tok, "queue"))
+ options |= RRDCONTEXT_OPTION_SHOW_QUEUED;
+ else if(!strcmp(tok, "flags"))
+ options |= RRDCONTEXT_OPTION_SHOW_FLAGS;
+ else if(!strcmp(tok, "uuids"))
+ options |= RRDCONTEXT_OPTION_SHOW_UUIDS;
+ else if(!strcmp(tok, "deleted"))
+ options |= RRDCONTEXT_OPTION_SHOW_DELETED;
+ else if(!strcmp(tok, "labels"))
+ options |= RRDCONTEXT_OPTION_SHOW_LABELS;
+ else if(!strcmp(tok, "deepscan"))
+ options |= RRDCONTEXT_OPTION_DEEPSCAN;
+ else if(!strcmp(tok, "hidden"))
+ options |= RRDCONTEXT_OPTION_SHOW_HIDDEN;
+ }
+
+ return options;
+}
+
+static int web_client_api_request_v1_context(RRDHOST *host, struct web_client *w, char *url) {
+ char *context = NULL;
+ RRDCONTEXT_TO_JSON_OPTIONS options = RRDCONTEXT_OPTION_NONE;
+ time_t after = 0, before = 0;
+ const char *chart_label_key = NULL, *chart_labels_filter = NULL;
+ BUFFER *dimensions = NULL;
+
+ buffer_flush(w->response.data);
+
+ while(url) {
+ char *value = mystrsep(&url, "&");
+ if(!value || !*value) continue;
+
+ char *name = mystrsep(&value, "=");
+ if(!name || !*name) continue;
+ if(!value || !*value) continue;
+
+ // name and value are now the parameters
+ // they are not null and not empty
+
+ if(!strcmp(name, "context") || !strcmp(name, "ctx")) context = value;
+ else if(!strcmp(name, "after")) after = str2l(value);
+ else if(!strcmp(name, "before")) before = str2l(value);
+ else if(!strcmp(name, "options")) options = rrdcontext_to_json_parse_options(value);
+ else if(!strcmp(name, "chart_label_key")) chart_label_key = value;
+ else if(!strcmp(name, "chart_labels_filter")) chart_labels_filter = value;
+ else if(!strcmp(name, "dimension") || !strcmp(name, "dim") || !strcmp(name, "dimensions") || !strcmp(name, "dims")) {
+ if(!dimensions) dimensions = buffer_create(100);
+ buffer_strcat(dimensions, "|");
+ buffer_strcat(dimensions, value);
+ }
+ }
+
+ if(!context || !*context) {
+ buffer_sprintf(w->response.data, "No context is given at the request.");
+ return HTTP_RESP_BAD_REQUEST;
+ }
+
+ SIMPLE_PATTERN *chart_label_key_pattern = NULL;
+ SIMPLE_PATTERN *chart_labels_filter_pattern = NULL;
+ SIMPLE_PATTERN *chart_dimensions_pattern = NULL;
+
+ if(chart_label_key)
+ chart_label_key_pattern = simple_pattern_create(chart_label_key, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT);
+
+ if(chart_labels_filter)
+ chart_labels_filter_pattern = simple_pattern_create(chart_labels_filter, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT);
+
+ if(dimensions) {
+ chart_dimensions_pattern = simple_pattern_create(buffer_tostring(dimensions), ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT);
+ buffer_free(dimensions);
+ }
+
+ w->response.data->contenttype = CT_APPLICATION_JSON;
+ int ret = rrdcontext_to_json(host, w->response.data, after, before, options, context, chart_label_key_pattern, chart_labels_filter_pattern, chart_dimensions_pattern);
+
+ simple_pattern_free(chart_label_key_pattern);
+ simple_pattern_free(chart_labels_filter_pattern);
+ simple_pattern_free(chart_dimensions_pattern);
+
+ return ret;
+}
+
+static int web_client_api_request_v1_contexts(RRDHOST *host, struct web_client *w, char *url) {
+ RRDCONTEXT_TO_JSON_OPTIONS options = RRDCONTEXT_OPTION_NONE;
+ time_t after = 0, before = 0;
+ const char *chart_label_key = NULL, *chart_labels_filter = NULL;
+ BUFFER *dimensions = NULL;
+
+ buffer_flush(w->response.data);
+
+ while(url) {
+ char *value = mystrsep(&url, "&");
+ if(!value || !*value) continue;
+
+ char *name = mystrsep(&value, "=");
+ if(!name || !*name) continue;
+ if(!value || !*value) continue;
+
+ // name and value are now the parameters
+ // they are not null and not empty
+
+ if(!strcmp(name, "after")) after = str2l(value);
+ else if(!strcmp(name, "before")) before = str2l(value);
+ else if(!strcmp(name, "options")) options = rrdcontext_to_json_parse_options(value);
+ else if(!strcmp(name, "chart_label_key")) chart_label_key = value;
+ else if(!strcmp(name, "chart_labels_filter")) chart_labels_filter = value;
+ else if(!strcmp(name, "dimension") || !strcmp(name, "dim") || !strcmp(name, "dimensions") || !strcmp(name, "dims")) {
+ if(!dimensions) dimensions = buffer_create(100);
+ buffer_strcat(dimensions, "|");
+ buffer_strcat(dimensions, value);
+ }
+ }
+
+ SIMPLE_PATTERN *chart_label_key_pattern = NULL;
+ SIMPLE_PATTERN *chart_labels_filter_pattern = NULL;
+ SIMPLE_PATTERN *chart_dimensions_pattern = NULL;
+
+ if(chart_label_key)
+ chart_label_key_pattern = simple_pattern_create(chart_label_key, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT);
+
+ if(chart_labels_filter)
+ chart_labels_filter_pattern = simple_pattern_create(chart_labels_filter, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT);
+
+ if(dimensions) {
+ chart_dimensions_pattern = simple_pattern_create(buffer_tostring(dimensions), ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT);
+ buffer_free(dimensions);
+ }
+
+ w->response.data->contenttype = CT_APPLICATION_JSON;
+ int ret = rrdcontexts_to_json(host, w->response.data, after, before, options, chart_label_key_pattern, chart_labels_filter_pattern, chart_dimensions_pattern);
+
+ simple_pattern_free(chart_label_key_pattern);
+ simple_pattern_free(chart_labels_filter_pattern);
+ simple_pattern_free(chart_dimensions_pattern);
+
+ return ret;
+}
+
+inline int web_client_api_request_v1_charts(RRDHOST *host, struct web_client *w, char *url) {
+ (void)url;
+
+ buffer_flush(w->response.data);
+ w->response.data->contenttype = CT_APPLICATION_JSON;
+ charts2json(host, w->response.data, 0, 0);
+ return HTTP_RESP_OK;
+}
+
+inline int web_client_api_request_v1_archivedcharts(RRDHOST *host __maybe_unused, struct web_client *w, char *url) {
+ (void)url;
+
+ buffer_flush(w->response.data);
+ w->response.data->contenttype = CT_APPLICATION_JSON;
+#ifdef ENABLE_DBENGINE
+ if (host->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE)
+ sql_rrdset2json(host, w->response.data);
+#endif
+ return HTTP_RESP_OK;
+}
+
+inline int web_client_api_request_v1_chart(RRDHOST *host, struct web_client *w, char *url) {
+ return web_client_api_request_single_chart(host, w, url, rrd_stats_api_v1_chart);
+}
+
+void fix_google_param(char *s) {
+ if(unlikely(!s)) return;
+
+ for( ; *s ;s++) {
+ if(!isalnum(*s) && *s != '.' && *s != '_' && *s != '-')
+ *s = '_';
+ }
+}
+
+
+// returns the HTTP code
+inline int web_client_api_request_v1_data(RRDHOST *host, struct web_client *w, char *url) {
+ debug(D_WEB_CLIENT, "%llu: API v1 data with URL '%s'", w->id, url);
+
+ int ret = HTTP_RESP_BAD_REQUEST;
+ BUFFER *dimensions = NULL;
+
+ buffer_flush(w->response.data);
+
+ char *google_version = "0.6",
+ *google_reqId = "0",
+ *google_sig = "0",
+ *google_out = "json",
+ *responseHandler = NULL,
+ *outFileName = NULL;
+
+ time_t last_timestamp_in_data = 0, google_timestamp = 0;
+
+ char *chart = NULL;
+ char *before_str = NULL;
+ char *after_str = NULL;
+ char *group_time_str = NULL;
+ char *points_str = NULL;
+ char *timeout_str = NULL;
+ char *context = NULL;
+ char *chart_label_key = NULL;
+ char *chart_labels_filter = NULL;
+ char *group_options = NULL;
+ size_t tier = 0;
+ RRDR_GROUPING group = RRDR_GROUPING_AVERAGE;
+ DATASOURCE_FORMAT format = DATASOURCE_JSON;
+ RRDR_OPTIONS options = 0;
+
+ while(url) {
+ char *value = mystrsep(&url, "&");
+ if(!value || !*value) continue;
+
+ char *name = mystrsep(&value, "=");
+ if(!name || !*name) continue;
+ if(!value || !*value) continue;
+
+ debug(D_WEB_CLIENT, "%llu: API v1 data query param '%s' with value '%s'", w->id, name, value);
+
+ // name and value are now the parameters
+ // they are not null and not empty
+
+ if(!strcmp(name, "context")) context = value;
+ else if(!strcmp(name, "chart_label_key")) chart_label_key = value;
+ else if(!strcmp(name, "chart_labels_filter")) chart_labels_filter = value;
+ else if(!strcmp(name, "chart")) chart = value;
+ else if(!strcmp(name, "dimension") || !strcmp(name, "dim") || !strcmp(name, "dimensions") || !strcmp(name, "dims")) {
+ if(!dimensions) dimensions = buffer_create(100);
+ buffer_strcat(dimensions, "|");
+ buffer_strcat(dimensions, value);
+ }
+ else if(!strcmp(name, "show_dimensions")) options |= RRDR_OPTION_ALL_DIMENSIONS;
+ else if(!strcmp(name, "after")) after_str = value;
+ else if(!strcmp(name, "before")) before_str = value;
+ else if(!strcmp(name, "points")) points_str = value;
+ else if(!strcmp(name, "timeout")) timeout_str = value;
+ else if(!strcmp(name, "gtime")) group_time_str = value;
+ else if(!strcmp(name, "group_options")) group_options = value;
+ else if(!strcmp(name, "group")) {
+ group = web_client_api_request_v1_data_group(value, RRDR_GROUPING_AVERAGE);
+ }
+ else if(!strcmp(name, "format")) {
+ format = web_client_api_request_v1_data_format(value);
+ }
+ else if(!strcmp(name, "options")) {
+ options |= web_client_api_request_v1_data_options(value);
+ }
+ else if(!strcmp(name, "callback")) {
+ responseHandler = value;
+ }
+ else if(!strcmp(name, "filename")) {
+ outFileName = value;
+ }
+ else if(!strcmp(name, "tqx")) {
+ // parse Google Visualization API options
+ // https://developers.google.com/chart/interactive/docs/dev/implementing_data_source
+ char *tqx_name, *tqx_value;
+
+ while(value) {
+ tqx_value = mystrsep(&value, ";");
+ if(!tqx_value || !*tqx_value) continue;
+
+ tqx_name = mystrsep(&tqx_value, ":");
+ if(!tqx_name || !*tqx_name) continue;
+ if(!tqx_value || !*tqx_value) continue;
+
+ if(!strcmp(tqx_name, "version"))
+ google_version = tqx_value;
+ else if(!strcmp(tqx_name, "reqId"))
+ google_reqId = tqx_value;
+ else if(!strcmp(tqx_name, "sig")) {
+ google_sig = tqx_value;
+ google_timestamp = strtoul(google_sig, NULL, 0);
+ }
+ else if(!strcmp(tqx_name, "out")) {
+ google_out = tqx_value;
+ format = web_client_api_request_v1_data_google_format(google_out);
+ }
+ else if(!strcmp(tqx_name, "responseHandler"))
+ responseHandler = tqx_value;
+ else if(!strcmp(tqx_name, "outFileName"))
+ outFileName = tqx_value;
+ }
+ }
+ else if(!strcmp(name, "tier")) {
+ tier = str2ul(value);
+ if(tier < storage_tiers)
+ options |= RRDR_OPTION_SELECTED_TIER;
+ else
+ tier = 0;
+ }
+ }
+
+ // validate the google parameters given
+ fix_google_param(google_out);
+ fix_google_param(google_sig);
+ fix_google_param(google_reqId);
+ fix_google_param(google_version);
+ fix_google_param(responseHandler);
+ fix_google_param(outFileName);
+
+ RRDSET *st = NULL;
+ ONEWAYALLOC *owa = onewayalloc_create(0);
+ QUERY_TARGET *qt = NULL;
+
+ if(!is_valid_sp(chart) && !is_valid_sp(context)) {
+ buffer_sprintf(w->response.data, "No chart or context is given.");
+ goto cleanup;
+ }
+
+ if(chart && !context) {
+ // check if this is a specific chart
+ st = rrdset_find(host, chart);
+ if (!st) st = rrdset_find_byname(host, chart);
+ }
+
+ long long before = (before_str && *before_str)?str2l(before_str):0;
+ long long after = (after_str && *after_str) ?str2l(after_str):-600;
+ int points = (points_str && *points_str)?str2i(points_str):0;
+ int timeout = (timeout_str && *timeout_str)?str2i(timeout_str): 0;
+ long group_time = (group_time_str && *group_time_str)?str2l(group_time_str):0;
+
+ QUERY_TARGET_REQUEST qtr = {
+ .after = after,
+ .before = before,
+ .host = host,
+ .st = st,
+ .hosts = NULL,
+ .contexts = context,
+ .charts = chart,
+ .dimensions = (dimensions)?buffer_tostring(dimensions):NULL,
+ .timeout = timeout,
+ .points = points,
+ .format = format,
+ .options = options,
+ .group_method = group,
+ .group_options = group_options,
+ .resampling_time = group_time,
+ .tier = tier,
+ .chart_label_key = chart_label_key,
+ .charts_labels_filter = chart_labels_filter,
+ .query_source = QUERY_SOURCE_API_DATA,
+ };
+ qt = query_target_create(&qtr);
+
+ if(!qt || !qt->query.used) {
+ buffer_sprintf(w->response.data, "No metrics where matched to query.");
+ ret = HTTP_RESP_NOT_FOUND;
+ goto cleanup;
+ }
+
+ if (timeout) {
+ struct timeval now;
+ now_realtime_timeval(&now);
+ int inqueue = (int)dt_usec(&w->tv_in, &now) / 1000;
+ timeout -= inqueue;
+ if (timeout <= 0) {
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "Query timeout exceeded");
+ ret = HTTP_RESP_BACKEND_FETCH_FAILED;
+ goto cleanup;
+ }
+ }
+
+ debug(D_WEB_CLIENT, "%llu: API command 'data' for chart '%s', dimensions '%s', after '%lld', before '%lld', points '%d', group '%u', format '%u', options '0x%08x'"
+ , w->id, chart, (dimensions)?buffer_tostring(dimensions):"", after, before , points, group, format, options);
+
+ if(outFileName && *outFileName) {
+ buffer_sprintf(w->response.header, "Content-Disposition: attachment; filename=\"%s\"\r\n", outFileName);
+ debug(D_WEB_CLIENT, "%llu: generating outfilename header: '%s'", w->id, outFileName);
+ }
+
+ if(format == DATASOURCE_DATATABLE_JSONP) {
+ if(responseHandler == NULL)
+ responseHandler = "google.visualization.Query.setResponse";
+
+ debug(D_WEB_CLIENT_ACCESS, "%llu: GOOGLE JSON/JSONP: version = '%s', reqId = '%s', sig = '%s', out = '%s', responseHandler = '%s', outFileName = '%s'",
+ w->id, google_version, google_reqId, google_sig, google_out, responseHandler, outFileName
+ );
+
+ buffer_sprintf(
+ w->response.data,
+ "%s({version:'%s',reqId:'%s',status:'ok',sig:'%"PRId64"',table:",
+ responseHandler,
+ google_version,
+ google_reqId,
+ (int64_t)st->last_updated.tv_sec);
+ }
+ else if(format == DATASOURCE_JSONP) {
+ if(responseHandler == NULL)
+ responseHandler = "callback";
+
+ buffer_strcat(w->response.data, responseHandler);
+ buffer_strcat(w->response.data, "(");
+ }
+
+ ret = data_query_execute(owa, w->response.data, qt, &last_timestamp_in_data);
+
+ if(format == DATASOURCE_DATATABLE_JSONP) {
+ if(google_timestamp < last_timestamp_in_data)
+ buffer_strcat(w->response.data, "});");
+
+ else {
+ // the client already has the latest data
+ buffer_flush(w->response.data);
+ buffer_sprintf(w->response.data,
+ "%s({version:'%s',reqId:'%s',status:'error',errors:[{reason:'not_modified',message:'Data not modified'}]});",
+ responseHandler, google_version, google_reqId);
+ }
+ }
+ else if(format == DATASOURCE_JSONP)
+ buffer_strcat(w->response.data, ");");
+
+cleanup:
+ if(qt && qt->used) {
+ internal_error(true, "QUERY_TARGET: left non-released on query '%s'", qt->id);
+ query_target_release(qt);
+ }
+ onewayalloc_destroy(owa);
+ buffer_free(dimensions);
+ return ret;
+}
+
+// Pings a netdata server:
+// /api/v1/registry?action=hello
+//
+// Access to a netdata registry:
+// /api/v1/registry?action=access&machine=${machine_guid}&name=${hostname}&url=${url}
+//
+// Delete from a netdata registry:
+// /api/v1/registry?action=delete&machine=${machine_guid}&name=${hostname}&url=${url}&delete_url=${delete_url}
+//
+// Search for the URLs of a machine:
+// /api/v1/registry?action=search&machine=${machine_guid}&name=${hostname}&url=${url}&for=${machine_guid}
+//
+// Impersonate:
+// /api/v1/registry?action=switch&machine=${machine_guid}&name=${hostname}&url=${url}&to=${new_person_guid}
+inline int web_client_api_request_v1_registry(RRDHOST *host, struct web_client *w, char *url) {
+ static uint32_t hash_action = 0, hash_access = 0, hash_hello = 0, hash_delete = 0, hash_search = 0,
+ hash_switch = 0, hash_machine = 0, hash_url = 0, hash_name = 0, hash_delete_url = 0, hash_for = 0,
+ hash_to = 0 /*, hash_redirects = 0 */;
+
+ if(unlikely(!hash_action)) {
+ hash_action = simple_hash("action");
+ hash_access = simple_hash("access");
+ hash_hello = simple_hash("hello");
+ hash_delete = simple_hash("delete");
+ hash_search = simple_hash("search");
+ hash_switch = simple_hash("switch");
+ hash_machine = simple_hash("machine");
+ hash_url = simple_hash("url");
+ hash_name = simple_hash("name");
+ hash_delete_url = simple_hash("delete_url");
+ hash_for = simple_hash("for");
+ hash_to = simple_hash("to");
+/*
+ hash_redirects = simple_hash("redirects");
+*/
+ }
+
+ char person_guid[GUID_LEN + 1] = "";
+
+ debug(D_WEB_CLIENT, "%llu: API v1 registry with URL '%s'", w->id, url);
+
+ // TODO
+ // The browser may send multiple cookies with our id
+
+ char *cookie = strstr(w->response.data->buffer, NETDATA_REGISTRY_COOKIE_NAME "=");
+ if(cookie)
+ strncpyz(person_guid, &cookie[sizeof(NETDATA_REGISTRY_COOKIE_NAME)], 36);
+
+ char action = '\0';
+ char *machine_guid = NULL,
+ *machine_url = NULL,
+ *url_name = NULL,
+ *search_machine_guid = NULL,
+ *delete_url = NULL,
+ *to_person_guid = NULL;
+/*
+ int redirects = 0;
+*/
+
+ // Don't cache registry responses
+ buffer_no_cacheable(w->response.data);
+
+ while(url) {
+ char *value = mystrsep(&url, "&");
+ if (!value || !*value) continue;
+
+ char *name = mystrsep(&value, "=");
+ if (!name || !*name) continue;
+ if (!value || !*value) continue;
+
+ debug(D_WEB_CLIENT, "%llu: API v1 registry query param '%s' with value '%s'", w->id, name, value);
+
+ uint32_t hash = simple_hash(name);
+
+ if(hash == hash_action && !strcmp(name, "action")) {
+ uint32_t vhash = simple_hash(value);
+
+ if(vhash == hash_access && !strcmp(value, "access")) action = 'A';
+ else if(vhash == hash_hello && !strcmp(value, "hello")) action = 'H';
+ else if(vhash == hash_delete && !strcmp(value, "delete")) action = 'D';
+ else if(vhash == hash_search && !strcmp(value, "search")) action = 'S';
+ else if(vhash == hash_switch && !strcmp(value, "switch")) action = 'W';
+#ifdef NETDATA_INTERNAL_CHECKS
+ else error("unknown registry action '%s'", value);
+#endif /* NETDATA_INTERNAL_CHECKS */
+ }
+/*
+ else if(hash == hash_redirects && !strcmp(name, "redirects"))
+ redirects = atoi(value);
+*/
+ else if(hash == hash_machine && !strcmp(name, "machine"))
+ machine_guid = value;
+
+ else if(hash == hash_url && !strcmp(name, "url"))
+ machine_url = value;
+
+ else if(action == 'A') {
+ if(hash == hash_name && !strcmp(name, "name"))
+ url_name = value;
+ }
+ else if(action == 'D') {
+ if(hash == hash_delete_url && !strcmp(name, "delete_url"))
+ delete_url = value;
+ }
+ else if(action == 'S') {
+ if(hash == hash_for && !strcmp(name, "for"))
+ search_machine_guid = value;
+ }
+ else if(action == 'W') {
+ if(hash == hash_to && !strcmp(name, "to"))
+ to_person_guid = value;
+ }
+#ifdef NETDATA_INTERNAL_CHECKS
+ else error("unused registry URL parameter '%s' with value '%s'", name, value);
+#endif /* NETDATA_INTERNAL_CHECKS */
+ }
+
+ if(unlikely(respect_web_browser_do_not_track_policy && web_client_has_donottrack(w))) {
+ buffer_flush(w->response.data);
+ buffer_sprintf(w->response.data, "Your web browser is sending 'DNT: 1' (Do Not Track). The registry requires persistent cookies on your browser to work.");
+ return HTTP_RESP_BAD_REQUEST;
+ }
+
+ if(unlikely(action == 'H')) {
+ // HELLO request, dashboard ACL
+ analytics_log_dashboard();
+ if(unlikely(!web_client_can_access_dashboard(w)))
+ return web_client_permission_denied(w);
+ }
+ else {
+ // everything else, registry ACL
+ if(unlikely(!web_client_can_access_registry(w)))
+ return web_client_permission_denied(w);
+ }
+
+ switch(action) {
+ case 'A':
+ if(unlikely(!machine_guid || !machine_url || !url_name)) {
+ error("Invalid registry request - access requires these parameters: machine ('%s'), url ('%s'), name ('%s')", machine_guid ? machine_guid : "UNSET", machine_url ? machine_url : "UNSET", url_name ? url_name : "UNSET");
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "Invalid registry Access request.");
+ return HTTP_RESP_BAD_REQUEST;
+ }
+
+ web_client_enable_tracking_required(w);
+ return registry_request_access_json(host, w, person_guid, machine_guid, machine_url, url_name, now_realtime_sec());
+
+ case 'D':
+ if(unlikely(!machine_guid || !machine_url || !delete_url)) {
+ error("Invalid registry request - delete requires these parameters: machine ('%s'), url ('%s'), delete_url ('%s')", machine_guid?machine_guid:"UNSET", machine_url?machine_url:"UNSET", delete_url?delete_url:"UNSET");
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "Invalid registry Delete request.");
+ return HTTP_RESP_BAD_REQUEST;
+ }
+
+ web_client_enable_tracking_required(w);
+ return registry_request_delete_json(host, w, person_guid, machine_guid, machine_url, delete_url, now_realtime_sec());
+
+ case 'S':
+ if(unlikely(!machine_guid || !machine_url || !search_machine_guid)) {
+ error("Invalid registry request - search requires these parameters: machine ('%s'), url ('%s'), for ('%s')", machine_guid?machine_guid:"UNSET", machine_url?machine_url:"UNSET", search_machine_guid?search_machine_guid:"UNSET");
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "Invalid registry Search request.");
+ return HTTP_RESP_BAD_REQUEST;
+ }
+
+ web_client_enable_tracking_required(w);
+ return registry_request_search_json(host, w, person_guid, machine_guid, machine_url, search_machine_guid, now_realtime_sec());
+
+ case 'W':
+ if(unlikely(!machine_guid || !machine_url || !to_person_guid)) {
+ error("Invalid registry request - switching identity requires these parameters: machine ('%s'), url ('%s'), to ('%s')", machine_guid?machine_guid:"UNSET", machine_url?machine_url:"UNSET", to_person_guid?to_person_guid:"UNSET");
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "Invalid registry Switch request.");
+ return HTTP_RESP_BAD_REQUEST;
+ }
+
+ web_client_enable_tracking_required(w);
+ return registry_request_switch_json(host, w, person_guid, machine_guid, machine_url, to_person_guid, now_realtime_sec());
+
+ case 'H':
+ return registry_request_hello_json(host, w);
+
+ default:
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "Invalid registry request - you need to set an action: hello, access, delete, search");
+ return HTTP_RESP_BAD_REQUEST;
+ }
+}
+
+static inline void web_client_api_request_v1_info_summary_alarm_statuses(RRDHOST *host, BUFFER *wb) {
+ int alarm_normal = 0, alarm_warn = 0, alarm_crit = 0;
+ RRDCALC *rc;
+ foreach_rrdcalc_in_rrdhost_read(host, rc) {
+ if(unlikely(!rc->rrdset || !rc->rrdset->last_collected_time.tv_sec))
+ continue;
+
+ switch(rc->status) {
+ case RRDCALC_STATUS_WARNING:
+ alarm_warn++;
+ break;
+ case RRDCALC_STATUS_CRITICAL:
+ alarm_crit++;
+ break;
+ default:
+ alarm_normal++;
+ }
+ }
+ foreach_rrdcalc_in_rrdhost_done(rc);
+ buffer_sprintf(wb, "\t\t\"normal\": %d,\n", alarm_normal);
+ buffer_sprintf(wb, "\t\t\"warning\": %d,\n", alarm_warn);
+ buffer_sprintf(wb, "\t\t\"critical\": %d\n", alarm_crit);
+}
+
+static inline void web_client_api_request_v1_info_mirrored_hosts(BUFFER *wb) {
+ RRDHOST *host;
+ int count = 0;
+
+ buffer_strcat(wb, "\t\"mirrored_hosts\": [\n");
+ rrd_rdlock();
+ rrdhost_foreach_read(host) {
+ if (count > 0)
+ buffer_strcat(wb, ",\n");
+
+ buffer_sprintf(wb, "\t\t\"%s\"", rrdhost_hostname(host));
+ count++;
+ }
+
+ buffer_strcat(wb, "\n\t],\n\t\"mirrored_hosts_status\": [\n");
+ count = 0;
+ rrdhost_foreach_read(host)
+ {
+ if (count > 0)
+ buffer_strcat(wb, ",\n");
+
+ netdata_mutex_lock(&host->receiver_lock);
+ buffer_sprintf(
+ wb, "\t\t{ \"guid\": \"%s\", \"hostname\": \"%s\", \"reachable\": %s, \"hops\": %d"
+ , host->machine_guid
+ , rrdhost_hostname(host)
+ , (host->receiver || host == localhost) ? "true" : "false"
+ , host->system_info ? host->system_info->hops : (host == localhost) ? 0 : 1
+ );
+ netdata_mutex_unlock(&host->receiver_lock);
+
+ rrdhost_aclk_state_lock(host);
+ if (host->aclk_state.claimed_id)
+ buffer_sprintf(wb, ", \"claim_id\": \"%s\"", host->aclk_state.claimed_id);
+ else
+ buffer_strcat(wb, ", \"claim_id\": null");
+ rrdhost_aclk_state_unlock(host);
+
+ if (host->node_id) {
+ char node_id_str[GUID_LEN + 1];
+ uuid_unparse_lower(*host->node_id, node_id_str);
+ buffer_sprintf(wb, ", \"node_id\": \"%s\" }", node_id_str);
+ } else
+ buffer_strcat(wb, ", \"node_id\": null }");
+
+ count++;
+ }
+ rrd_unlock();
+
+ buffer_strcat(wb, "\n\t],\n");
+}
+
+inline void host_labels2json(RRDHOST *host, BUFFER *wb, size_t indentation) {
+ char tabs[11];
+
+ if (indentation > 10)
+ indentation = 10;
+
+ tabs[0] = '\0';
+ while (indentation) {
+ strcat(tabs, "\t");
+ indentation--;
+ }
+
+ rrdlabels_to_buffer(host->rrdlabels, wb, tabs, ":", "\"", ",\n", NULL, NULL, NULL, NULL);
+ buffer_strcat(wb, "\n");
+}
+
+extern int aclk_connected;
+inline int web_client_api_request_v1_info_fill_buffer(RRDHOST *host, BUFFER *wb)
+{
+ buffer_strcat(wb, "{\n");
+ buffer_sprintf(wb, "\t\"version\": \"%s\",\n", rrdhost_program_version(host));
+ buffer_sprintf(wb, "\t\"uid\": \"%s\",\n", host->machine_guid);
+
+ web_client_api_request_v1_info_mirrored_hosts(wb);
+
+ buffer_strcat(wb, "\t\"alarms\": {\n");
+ web_client_api_request_v1_info_summary_alarm_statuses(host, wb);
+ buffer_strcat(wb, "\t},\n");
+
+ buffer_sprintf(wb, "\t\"os_name\": \"%s\",\n", (host->system_info->host_os_name) ? host->system_info->host_os_name : "");
+ buffer_sprintf(wb, "\t\"os_id\": \"%s\",\n", (host->system_info->host_os_id) ? host->system_info->host_os_id : "");
+ buffer_sprintf(wb, "\t\"os_id_like\": \"%s\",\n", (host->system_info->host_os_id_like) ? host->system_info->host_os_id_like : "");
+ buffer_sprintf(wb, "\t\"os_version\": \"%s\",\n", (host->system_info->host_os_version) ? host->system_info->host_os_version : "");
+ buffer_sprintf(wb, "\t\"os_version_id\": \"%s\",\n", (host->system_info->host_os_version_id) ? host->system_info->host_os_version_id : "");
+ buffer_sprintf(wb, "\t\"os_detection\": \"%s\",\n", (host->system_info->host_os_detection) ? host->system_info->host_os_detection : "");
+ buffer_sprintf(wb, "\t\"cores_total\": \"%s\",\n", (host->system_info->host_cores) ? host->system_info->host_cores : "");
+ buffer_sprintf(wb, "\t\"total_disk_space\": \"%s\",\n", (host->system_info->host_disk_space) ? host->system_info->host_disk_space : "");
+ buffer_sprintf(wb, "\t\"cpu_freq\": \"%s\",\n", (host->system_info->host_cpu_freq) ? host->system_info->host_cpu_freq : "");
+ buffer_sprintf(wb, "\t\"ram_total\": \"%s\",\n", (host->system_info->host_ram_total) ? host->system_info->host_ram_total : "");
+
+ if (host->system_info->container_os_name)
+ buffer_sprintf(wb, "\t\"container_os_name\": \"%s\",\n", host->system_info->container_os_name);
+ if (host->system_info->container_os_id)
+ buffer_sprintf(wb, "\t\"container_os_id\": \"%s\",\n", host->system_info->container_os_id);
+ if (host->system_info->container_os_id_like)
+ buffer_sprintf(wb, "\t\"container_os_id_like\": \"%s\",\n", host->system_info->container_os_id_like);
+ if (host->system_info->container_os_version)
+ buffer_sprintf(wb, "\t\"container_os_version\": \"%s\",\n", host->system_info->container_os_version);
+ if (host->system_info->container_os_version_id)
+ buffer_sprintf(wb, "\t\"container_os_version_id\": \"%s\",\n", host->system_info->container_os_version_id);
+ if (host->system_info->container_os_detection)
+ buffer_sprintf(wb, "\t\"container_os_detection\": \"%s\",\n", host->system_info->container_os_detection);
+ if (host->system_info->is_k8s_node)
+ buffer_sprintf(wb, "\t\"is_k8s_node\": \"%s\",\n", host->system_info->is_k8s_node);
+
+ buffer_sprintf(wb, "\t\"kernel_name\": \"%s\",\n", (host->system_info->kernel_name) ? host->system_info->kernel_name : "");
+ buffer_sprintf(wb, "\t\"kernel_version\": \"%s\",\n", (host->system_info->kernel_version) ? host->system_info->kernel_version : "");
+ buffer_sprintf(wb, "\t\"architecture\": \"%s\",\n", (host->system_info->architecture) ? host->system_info->architecture : "");
+ buffer_sprintf(wb, "\t\"virtualization\": \"%s\",\n", (host->system_info->virtualization) ? host->system_info->virtualization : "");
+ buffer_sprintf(wb, "\t\"virt_detection\": \"%s\",\n", (host->system_info->virt_detection) ? host->system_info->virt_detection : "");
+ buffer_sprintf(wb, "\t\"container\": \"%s\",\n", (host->system_info->container) ? host->system_info->container : "");
+ buffer_sprintf(wb, "\t\"container_detection\": \"%s\",\n", (host->system_info->container_detection) ? host->system_info->container_detection : "");
+
+ if (host->system_info->cloud_provider_type)
+ buffer_sprintf(wb, "\t\"cloud_provider_type\": \"%s\",\n", host->system_info->cloud_provider_type);
+ if (host->system_info->cloud_instance_type)
+ buffer_sprintf(wb, "\t\"cloud_instance_type\": \"%s\",\n", host->system_info->cloud_instance_type);
+ if (host->system_info->cloud_instance_region)
+ buffer_sprintf(wb, "\t\"cloud_instance_region\": \"%s\",\n", host->system_info->cloud_instance_region);
+
+ buffer_strcat(wb, "\t\"host_labels\": {\n");
+ host_labels2json(host, wb, 2);
+ buffer_strcat(wb, "\t},\n");
+
+ buffer_strcat(wb, "\t\"functions\": {\n");
+ host_functions2json(host, wb, 2, "\"", "\"");
+ buffer_strcat(wb, "\t},\n");
+
+ buffer_strcat(wb, "\t\"collectors\": [");
+ chartcollectors2json(host, wb);
+ buffer_strcat(wb, "\n\t],\n");
+
+#ifdef DISABLE_CLOUD
+ buffer_strcat(wb, "\t\"cloud-enabled\": false,\n");
+#else
+ buffer_sprintf(wb, "\t\"cloud-enabled\": %s,\n",
+ appconfig_get_boolean(&cloud_config, CONFIG_SECTION_GLOBAL, "enabled", 1) ? "true" : "false");
+#endif
+
+#ifdef ENABLE_ACLK
+ buffer_strcat(wb, "\t\"cloud-available\": true,\n");
+#else
+ buffer_strcat(wb, "\t\"cloud-available\": false,\n");
+#endif
+ char *agent_id = get_agent_claimid();
+ if (agent_id == NULL)
+ buffer_strcat(wb, "\t\"agent-claimed\": false,\n");
+ else {
+ buffer_strcat(wb, "\t\"agent-claimed\": true,\n");
+ freez(agent_id);
+ }
+#ifdef ENABLE_ACLK
+ if (aclk_connected) {
+ buffer_strcat(wb, "\t\"aclk-available\": true,\n");
+ }
+ else
+#endif
+ buffer_strcat(wb, "\t\"aclk-available\": false,\n"); // Intentionally valid with/without #ifdef above
+
+ buffer_strcat(wb, "\t\"memory-mode\": ");
+ analytics_get_data(analytics_data.netdata_config_memory_mode, wb);
+ buffer_strcat(wb, ",\n");
+
+ buffer_strcat(wb, "\t\"multidb-disk-quota\": ");
+ analytics_get_data(analytics_data.netdata_config_multidb_disk_quota, wb);
+ buffer_strcat(wb, ",\n");
+
+ buffer_strcat(wb, "\t\"page-cache-size\": ");
+ analytics_get_data(analytics_data.netdata_config_page_cache_size, wb);
+ buffer_strcat(wb, ",\n");
+
+ buffer_strcat(wb, "\t\"stream-enabled\": ");
+ analytics_get_data(analytics_data.netdata_config_stream_enabled, wb);
+ buffer_strcat(wb, ",\n");
+
+#ifdef ENABLE_COMPRESSION
+ if(host->sender){
+ buffer_strcat(wb, "\t\"stream-compression\": ");
+ buffer_strcat(wb, stream_has_capability(host->sender, STREAM_CAP_COMPRESSION) ? "true" : "false");
+ buffer_strcat(wb, ",\n");
+ }else{
+ buffer_strcat(wb, "\t\"stream-compression\": null,\n");
+ }
+#else
+ buffer_strcat(wb, "\t\"stream-compression\": null,\n");
+#endif //ENABLE_COMPRESSION
+
+ buffer_strcat(wb, "\t\"hosts-available\": ");
+ analytics_get_data(analytics_data.netdata_config_hosts_available, wb);
+ buffer_strcat(wb, ",\n");
+
+ buffer_strcat(wb, "\t\"https-enabled\": ");
+ analytics_get_data(analytics_data.netdata_config_https_enabled, wb);
+ buffer_strcat(wb, ",\n");
+
+ buffer_strcat(wb, "\t\"buildinfo\": ");
+ analytics_get_data(analytics_data.netdata_buildinfo, wb);
+ buffer_strcat(wb, ",\n");
+
+ buffer_strcat(wb, "\t\"release-channel\": ");
+ analytics_get_data(analytics_data.netdata_config_release_channel, wb);
+ buffer_strcat(wb, ",\n");
+
+ buffer_strcat(wb, "\t\"web-enabled\": ");
+ analytics_get_data(analytics_data.netdata_config_web_enabled, wb);
+ buffer_strcat(wb, ",\n");
+
+ buffer_strcat(wb, "\t\"notification-methods\": ");
+ analytics_get_data(analytics_data.netdata_notification_methods, wb);
+ buffer_strcat(wb, ",\n");
+
+ buffer_strcat(wb, "\t\"exporting-enabled\": ");
+ analytics_get_data(analytics_data.netdata_config_exporting_enabled, wb);
+ buffer_strcat(wb, ",\n");
+
+ buffer_strcat(wb, "\t\"exporting-connectors\": ");
+ analytics_get_data(analytics_data.netdata_exporting_connectors, wb);
+ buffer_strcat(wb, ",\n");
+
+ buffer_strcat(wb, "\t\"allmetrics-prometheus-used\": ");
+ analytics_get_data(analytics_data.netdata_allmetrics_prometheus_used, wb);
+ buffer_strcat(wb, ",\n");
+
+ buffer_strcat(wb, "\t\"allmetrics-shell-used\": ");
+ analytics_get_data(analytics_data.netdata_allmetrics_shell_used, wb);
+ buffer_strcat(wb, ",\n");
+
+ buffer_strcat(wb, "\t\"allmetrics-json-used\": ");
+ analytics_get_data(analytics_data.netdata_allmetrics_json_used, wb);
+ buffer_strcat(wb, ",\n");
+
+ buffer_strcat(wb, "\t\"dashboard-used\": ");
+ analytics_get_data(analytics_data.netdata_dashboard_used, wb);
+ buffer_strcat(wb, ",\n");
+
+ buffer_strcat(wb, "\t\"charts-count\": ");
+ analytics_get_data(analytics_data.netdata_charts_count, wb);
+ buffer_strcat(wb, ",\n");
+
+ buffer_strcat(wb, "\t\"metrics-count\": ");
+ analytics_get_data(analytics_data.netdata_metrics_count, wb);
+
+#if defined(ENABLE_ML)
+ buffer_strcat(wb, ",\n");
+ char *ml_info = ml_get_host_info(host);
+
+ buffer_strcat(wb, "\t\"ml-info\": ");
+ buffer_strcat(wb, ml_info);
+
+ freez(ml_info);
+#endif
+
+ buffer_strcat(wb, "\n}");
+ return 0;
+}
+
+#if defined(ENABLE_ML)
+int web_client_api_request_v1_ml_info(RRDHOST *host, struct web_client *w, char *url) {
+ (void) url;
+
+ if (!netdata_ready)
+ return HTTP_RESP_BACKEND_FETCH_FAILED;
+
+ char *s = ml_get_host_runtime_info(host);
+ if (!s)
+ s = strdupz("{\"error\": \"json string is empty\" }\n");
+
+ BUFFER *wb = w->response.data;
+ buffer_flush(wb);
+ wb->contenttype = CT_APPLICATION_JSON;
+ buffer_strcat(wb, s);
+ buffer_no_cacheable(wb);
+
+ freez(s);
+ return HTTP_RESP_OK;
+}
+
+int web_client_api_request_v1_ml_models(RRDHOST *host, struct web_client *w, char *url) {
+ (void) url;
+
+ if (!netdata_ready)
+ return HTTP_RESP_BACKEND_FETCH_FAILED;
+
+ char *s = ml_get_host_models(host);
+ if (!s)
+ s = strdupz("{\"error\": \"json string is empty\" }\n");
+
+ BUFFER *wb = w->response.data;
+ buffer_flush(wb);
+ wb->contenttype = CT_APPLICATION_JSON;
+ buffer_strcat(wb, s);
+ buffer_no_cacheable(wb);
+
+ freez(s);
+ return HTTP_RESP_OK;
+}
+#endif
+
+inline int web_client_api_request_v1_info(RRDHOST *host, struct web_client *w, char *url) {
+ (void)url;
+ if (!netdata_ready) return HTTP_RESP_BACKEND_FETCH_FAILED;
+ BUFFER *wb = w->response.data;
+ buffer_flush(wb);
+ wb->contenttype = CT_APPLICATION_JSON;
+
+ web_client_api_request_v1_info_fill_buffer(host, wb);
+
+ buffer_no_cacheable(wb);
+ return HTTP_RESP_OK;
+}
+
+static int web_client_api_request_v1_aclk_state(RRDHOST *host, struct web_client *w, char *url) {
+ UNUSED(url);
+ UNUSED(host);
+ if (!netdata_ready) return HTTP_RESP_BACKEND_FETCH_FAILED;
+
+ BUFFER *wb = w->response.data;
+ buffer_flush(wb);
+
+ char *str = aclk_state_json();
+ buffer_strcat(wb, str);
+ freez(str);
+
+ wb->contenttype = CT_APPLICATION_JSON;
+ buffer_no_cacheable(wb);
+ return HTTP_RESP_OK;
+}
+
+static int web_client_api_request_v1_weights_internal(RRDHOST *host, struct web_client *w, char *url, WEIGHTS_METHOD method, WEIGHTS_FORMAT format) {
+ if (!netdata_ready)
+ return HTTP_RESP_BACKEND_FETCH_FAILED;
+
+ long long baseline_after = 0, baseline_before = 0, after = 0, before = 0, points = 0;
+ RRDR_OPTIONS options = RRDR_OPTION_NOT_ALIGNED | RRDR_OPTION_NONZERO | RRDR_OPTION_NULL2ZERO;
+ int options_count = 0;
+ RRDR_GROUPING group = RRDR_GROUPING_AVERAGE;
+ int timeout = 0;
+ size_t tier = 0;
+ const char *group_options = NULL, *contexts_str = NULL;
+
+ while (url) {
+ char *value = mystrsep(&url, "&");
+ if (!value || !*value)
+ continue;
+
+ char *name = mystrsep(&value, "=");
+ if (!name || !*name)
+ continue;
+ if (!value || !*value)
+ continue;
+
+ if (!strcmp(name, "baseline_after"))
+ baseline_after = (long long) strtoul(value, NULL, 0);
+
+ else if (!strcmp(name, "baseline_before"))
+ baseline_before = (long long) strtoul(value, NULL, 0);
+
+ else if (!strcmp(name, "after") || !strcmp(name, "highlight_after"))
+ after = (long long) strtoul(value, NULL, 0);
+
+ else if (!strcmp(name, "before") || !strcmp(name, "highlight_before"))
+ before = (long long) strtoul(value, NULL, 0);
+
+ else if (!strcmp(name, "points") || !strcmp(name, "max_points"))
+ points = (long long) strtoul(value, NULL, 0);
+
+ else if (!strcmp(name, "timeout"))
+ timeout = (int) strtoul(value, NULL, 0);
+
+ else if(!strcmp(name, "group"))
+ group = web_client_api_request_v1_data_group(value, RRDR_GROUPING_AVERAGE);
+
+ else if(!strcmp(name, "options")) {
+ if(!options_count) options = RRDR_OPTION_NOT_ALIGNED | RRDR_OPTION_NULL2ZERO;
+ options |= web_client_api_request_v1_data_options(value);
+ options_count++;
+ }
+
+ else if(!strcmp(name, "method"))
+ method = weights_string_to_method(value);
+
+ else if(!strcmp(name, "context") || !strcmp(name, "contexts"))
+ contexts_str = value;
+
+ else if(!strcmp(name, "tier")) {
+ tier = str2ul(value);
+ if(tier < storage_tiers)
+ options |= RRDR_OPTION_SELECTED_TIER;
+ else
+ tier = 0;
+ }
+ }
+
+ BUFFER *wb = w->response.data;
+ buffer_flush(wb);
+ wb->contenttype = CT_APPLICATION_JSON;
+
+ SIMPLE_PATTERN *contexts = (contexts_str) ? simple_pattern_create(contexts_str, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT) : NULL;
+
+ int ret = web_api_v1_weights(host, wb, method, format, group, group_options, baseline_after, baseline_before, after, before, points, options, contexts, tier, timeout);
+
+ simple_pattern_free(contexts);
+ return ret;
+}
+
+int web_client_api_request_v1_metric_correlations(RRDHOST *host, struct web_client *w, char *url) {
+ return web_client_api_request_v1_weights_internal(host, w, url, default_metric_correlations_method, WEIGHTS_FORMAT_CHARTS);
+}
+
+int web_client_api_request_v1_weights(RRDHOST *host, struct web_client *w, char *url) {
+ return web_client_api_request_v1_weights_internal(host, w, url, WEIGHTS_METHOD_ANOMALY_RATE, WEIGHTS_FORMAT_CONTEXTS);
+}
+
+int web_client_api_request_v1_function(RRDHOST *host, struct web_client *w, char *url) {
+ if (!netdata_ready)
+ return HTTP_RESP_BACKEND_FETCH_FAILED;
+
+ int timeout = 0;
+ const char *function = NULL;
+
+ while (url) {
+ char *value = mystrsep(&url, "&");
+ if (!value || !*value)
+ continue;
+
+ char *name = mystrsep(&value, "=");
+ if (!name || !*name)
+ continue;
+
+ if (!strcmp(name, "function"))
+ function = value;
+
+ else if (!strcmp(name, "timeout"))
+ timeout = (int) strtoul(value, NULL, 0);
+ }
+
+ BUFFER *wb = w->response.data;
+ buffer_flush(wb);
+ wb->contenttype = CT_APPLICATION_JSON;
+ buffer_no_cacheable(wb);
+
+ return rrd_call_function_and_wait(host, wb, timeout, function);
+}
+
+int web_client_api_request_v1_functions(RRDHOST *host, struct web_client *w, char *url __maybe_unused) {
+ if (!netdata_ready)
+ return HTTP_RESP_BACKEND_FETCH_FAILED;
+
+ BUFFER *wb = w->response.data;
+ buffer_flush(wb);
+ wb->contenttype = CT_APPLICATION_JSON;
+ buffer_no_cacheable(wb);
+
+ buffer_strcat(wb, "{\n");
+ host_functions2json(host, wb, 1, "\"", "\"");
+ buffer_strcat(wb, "}");
+
+ return HTTP_RESP_OK;
+}
+
+#ifndef ENABLE_DBENGINE
+int web_client_api_request_v1_dbengine_stats(RRDHOST *host, struct web_client *w, char *url) {
+ return HTTP_RESP_NOT_FOUND;
+}
+#else
+static void web_client_api_v1_dbengine_stats_for_tier(BUFFER *wb, size_t tier) {
+ RRDENG_SIZE_STATS stats = rrdeng_size_statistics(multidb_ctx[tier]);
+
+ buffer_sprintf(wb,
+ "\n\t\t\"default_granularity_secs\":%zu"
+ ",\n\t\t\"sizeof_metric\":%zu"
+ ",\n\t\t\"sizeof_metric_in_index\":%zu"
+ ",\n\t\t\"sizeof_page\":%zu"
+ ",\n\t\t\"sizeof_page_in_index\":%zu"
+ ",\n\t\t\"sizeof_extent\":%zu"
+ ",\n\t\t\"sizeof_page_in_extent\":%zu"
+ ",\n\t\t\"sizeof_datafile\":%zu"
+ ",\n\t\t\"sizeof_page_in_cache\":%zu"
+ ",\n\t\t\"sizeof_point_data\":%zu"
+ ",\n\t\t\"sizeof_page_data\":%zu"
+ ",\n\t\t\"pages_per_extent\":%zu"
+ ",\n\t\t\"datafiles\":%zu"
+ ",\n\t\t\"extents\":%zu"
+ ",\n\t\t\"extents_pages\":%zu"
+ ",\n\t\t\"points\":%zu"
+ ",\n\t\t\"metrics\":%zu"
+ ",\n\t\t\"metrics_pages\":%zu"
+ ",\n\t\t\"extents_compressed_bytes\":%zu"
+ ",\n\t\t\"pages_uncompressed_bytes\":%zu"
+ ",\n\t\t\"pages_duration_secs\":%lld"
+ ",\n\t\t\"single_point_pages\":%zu"
+ ",\n\t\t\"first_t\":%llu"
+ ",\n\t\t\"last_t\":%llu"
+ ",\n\t\t\"database_retention_secs\":%lld"
+ ",\n\t\t\"average_compression_savings\":%0.2f"
+ ",\n\t\t\"average_point_duration_secs\":%0.2f"
+ ",\n\t\t\"average_metric_retention_secs\":%0.2f"
+ ",\n\t\t\"ephemeral_metrics_per_day_percent\":%0.2f"
+ ",\n\t\t\"average_page_size_bytes\":%0.2f"
+ ",\n\t\t\"estimated_concurrently_collected_metrics\":%zu"
+ ",\n\t\t\"currently_collected_metrics\":%zu"
+ ",\n\t\t\"max_concurrently_collected_metrics\":%zu"
+ ",\n\t\t\"disk_space\":%zu"
+ ",\n\t\t\"max_disk_space\":%zu"
+ , stats.default_granularity_secs
+ , stats.sizeof_metric
+ , stats.sizeof_metric_in_index
+ , stats.sizeof_page
+ , stats.sizeof_page_in_index
+ , stats.sizeof_extent
+ , stats.sizeof_page_in_extent
+ , stats.sizeof_datafile
+ , stats.sizeof_page_in_cache
+ , stats.sizeof_point_data
+ , stats.sizeof_page_data
+ , stats.pages_per_extent
+ , stats.datafiles
+ , stats.extents
+ , stats.extents_pages
+ , stats.points
+ , stats.metrics
+ , stats.metrics_pages
+ , stats.extents_compressed_bytes
+ , stats.pages_uncompressed_bytes
+ , (long long)stats.pages_duration_secs
+ , stats.single_point_pages
+ , stats.first_t
+ , stats.last_t
+ , (long long)stats.database_retention_secs
+ , stats.average_compression_savings
+ , stats.average_point_duration_secs
+ , stats.average_metric_retention_secs
+ , stats.ephemeral_metrics_per_day_percent
+ , stats.average_page_size_bytes
+ , stats.estimated_concurrently_collected_metrics
+ , stats.currently_collected_metrics
+ , stats.max_concurrently_collected_metrics
+ , stats.disk_space
+ , stats.max_disk_space
+ );
+}
+int web_client_api_request_v1_dbengine_stats(RRDHOST *host __maybe_unused, struct web_client *w, char *url __maybe_unused) {
+ if (!netdata_ready)
+ return HTTP_RESP_BACKEND_FETCH_FAILED;
+
+ BUFFER *wb = w->response.data;
+ buffer_flush(wb);
+
+ if(!dbengine_enabled) {
+ buffer_strcat(wb, "dbengine is not enabled");
+ return HTTP_RESP_NOT_FOUND;
+ }
+
+ wb->contenttype = CT_APPLICATION_JSON;
+ buffer_no_cacheable(wb);
+ buffer_strcat(wb, "{");
+ for(size_t tier = 0; tier < storage_tiers ;tier++) {
+ buffer_sprintf(wb, "%s\n\t\"tier%zu\": {", tier?",":"", tier);
+ web_client_api_v1_dbengine_stats_for_tier(wb, tier);
+ buffer_strcat(wb, "\n\t}");
+ }
+ buffer_strcat(wb, "\n}");
+
+ return HTTP_RESP_OK;
+}
+#endif
+
+#ifdef NETDATA_DEV_MODE
+#define ACL_DEV_OPEN_ACCESS WEB_CLIENT_ACL_DASHBOARD
+#else
+#define ACL_DEV_OPEN_ACCESS 0
+#endif
+
+static struct api_command {
+ const char *command;
+ uint32_t hash;
+ WEB_CLIENT_ACL acl;
+ int (*callback)(RRDHOST *host, struct web_client *w, char *url);
+} api_commands[] = {
+ { "info", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_info },
+ { "data", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_data },
+ { "chart", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_chart },
+ { "charts", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_charts },
+ { "context", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_context },
+ { "contexts", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_contexts },
+ { "archivedcharts", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_archivedcharts },
+
+ // registry checks the ACL by itself, so we allow everything
+ { "registry", 0, WEB_CLIENT_ACL_NOCHECK, web_client_api_request_v1_registry },
+
+ // badges can be fetched with both dashboard and badge permissions
+ { "badge.svg", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_BADGE | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_badge },
+
+ { "alarms", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_alarms },
+ { "alarms_values", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_alarms_values },
+ { "alarm_log", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_alarm_log },
+ { "alarm_variables", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_alarm_variables },
+ { "alarm_count", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_alarm_count },
+ { "allmetrics", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_allmetrics },
+
+#if defined(ENABLE_ML)
+ { "ml_info", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_ml_info },
+ { "ml_models", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_ml_models },
+#endif
+
+ { "manage/health", 0, WEB_CLIENT_ACL_MGMT | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_mgmt_health },
+ { "aclk", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_aclk_state },
+ { "metric_correlations", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_metric_correlations },
+ { "weights", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_weights },
+
+ { "function", 0, WEB_CLIENT_ACL_ACLK | ACL_DEV_OPEN_ACCESS, web_client_api_request_v1_function },
+ { "functions", 0, WEB_CLIENT_ACL_ACLK | ACL_DEV_OPEN_ACCESS, web_client_api_request_v1_functions },
+
+ { "dbengine_stats", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_dbengine_stats },
+
+ // terminator
+ { NULL, 0, WEB_CLIENT_ACL_NONE, NULL },
+};
+
+inline int web_client_api_request_v1(RRDHOST *host, struct web_client *w, char *url) {
+ static int initialized = 0;
+ int i;
+
+ if(unlikely(initialized == 0)) {
+ initialized = 1;
+
+ for(i = 0; api_commands[i].command ; i++)
+ api_commands[i].hash = simple_hash(api_commands[i].command);
+ }
+
+ // get the command
+ if(url) {
+ debug(D_WEB_CLIENT, "%llu: Searching for API v1 command '%s'.", w->id, url);
+ uint32_t hash = simple_hash(url);
+
+ for(i = 0; api_commands[i].command ;i++) {
+ if(unlikely(hash == api_commands[i].hash && !strcmp(url, api_commands[i].command))) {
+ if(unlikely(api_commands[i].acl != WEB_CLIENT_ACL_NOCHECK) && !(w->acl & api_commands[i].acl))
+ return web_client_permission_denied(w);
+
+ //return api_commands[i].callback(host, w, url);
+ return api_commands[i].callback(host, w, (w->decoded_query_string + 1));
+ }
+ }
+
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "Unsupported v1 API command: ");
+ buffer_strcat_htmlescape(w->response.data, url);
+ return HTTP_RESP_NOT_FOUND;
+ }
+ else {
+ buffer_flush(w->response.data);
+ buffer_sprintf(w->response.data, "Which API v1 command?");
+ return HTTP_RESP_BAD_REQUEST;
+ }
+}
diff --git a/web/api/web_api_v1.h b/web/api/web_api_v1.h
new file mode 100644
index 0000000..e6682c9
--- /dev/null
+++ b/web/api/web_api_v1.h
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_WEB_API_V1_H
+#define NETDATA_WEB_API_V1_H 1
+
+#include "daemon/common.h"
+#include "web/api/badges/web_buffer_svg.h"
+#include "web/api/formatters/rrd2json.h"
+#include "web/api/health/health_cmdapi.h"
+#include "web/api/queries/weights.h"
+
+#define MAX_CHART_LABELS_FILTER (32)
+RRDR_OPTIONS web_client_api_request_v1_data_options(char *o);
+void web_client_api_request_v1_data_options_to_buffer(BUFFER *wb, RRDR_OPTIONS options);
+void web_client_api_request_v1_data_options_to_string(char *buf, size_t size, RRDR_OPTIONS options);
+
+uint32_t web_client_api_request_v1_data_format(char *name);
+uint32_t web_client_api_request_v1_data_google_format(char *name);
+
+int web_client_api_request_v1_alarms(RRDHOST *host, struct web_client *w, char *url);
+int web_client_api_request_v1_alarms_values(RRDHOST *host, struct web_client *w, char *url);
+int web_client_api_request_v1_alarm_log(RRDHOST *host, struct web_client *w, char *url);
+int web_client_api_request_single_chart(RRDHOST *host, struct web_client *w, char *url, void callback(RRDSET *st, BUFFER *buf));
+int web_client_api_request_v1_alarm_variables(RRDHOST *host, struct web_client *w, char *url);
+int web_client_api_request_v1_alarm_count(RRDHOST *host, struct web_client *w, char *url);
+int web_client_api_request_v1_charts(RRDHOST *host, struct web_client *w, char *url);
+int web_client_api_request_v1_archivedcharts(RRDHOST *host, struct web_client *w, char *url);
+int web_client_api_request_v1_chart(RRDHOST *host, struct web_client *w, char *url);
+int web_client_api_request_v1_data(RRDHOST *host, struct web_client *w, char *url);
+int web_client_api_request_v1_registry(RRDHOST *host, struct web_client *w, char *url);
+int web_client_api_request_v1_info(RRDHOST *host, struct web_client *w, char *url);
+int web_client_api_request_v1(RRDHOST *host, struct web_client *w, char *url);
+int web_client_api_request_v1_info_fill_buffer(RRDHOST *host, BUFFER *wb);
+void host_labels2json(RRDHOST *host, BUFFER *wb, size_t indentation);
+
+void web_client_api_v1_init(void);
+void web_client_api_v1_management_init(void);
+
+extern char *api_secret;
+
+#endif //NETDATA_WEB_API_V1_H