diff options
Diffstat (limited to 'web/api/formatters')
26 files changed, 2432 insertions, 0 deletions
diff --git a/web/api/formatters/Makefile.am b/web/api/formatters/Makefile.am new file mode 100644 index 0000000..11f239c --- /dev/null +++ b/web/api/formatters/Makefile.am @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +SUBDIRS = \ + csv \ + json \ + ssv \ + value \ + $(NULL) + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/web/api/formatters/README.md b/web/api/formatters/README.md new file mode 100644 index 0000000..3e67ac6 --- /dev/null +++ b/web/api/formatters/README.md @@ -0,0 +1,78 @@ +<!-- +title: "Query formatting" +custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/formatters/README.md +--> + +# Query formatting + +API data queries need to be formatted before returned to the caller. +Using API parameters, the caller may define the format he/she wishes to get back. + +The following formats are supported: + +| format|module|content type|description| +|:----:|:----:|:----------:|:----------| +| `array`|[ssv](/web/api/formatters/ssv/README.md)|application/json|a JSON array| +| `csv`|[csv](/web/api/formatters/csv/README.md)|text/plain|a text table, comma separated, with a header line (dimension names) and `\r\n` at the end of the lines| +| `csvjsonarray`|[csv](/web/api/formatters/csv/README.md)|application/json|a JSON array, with each row as another array (the first row has the dimension names)| +| `datasource`|[json](/web/api/formatters/json/README.md)|application/json|a Google Visualization Provider `datasource` javascript callback| +| `datatable`|[json](/web/api/formatters/json/README.md)|application/json|a Google `datatable`| +| `html`|[csv](/web/api/formatters/csv/README.md)|text/html|an html table| +| `json`|[json](/web/api/formatters/json/README.md)|application/json|a JSON object| +| `jsonp`|[json](/web/api/formatters/json/README.md)|application/json|a JSONP javascript callback| +| `markdown`|[csv](/web/api/formatters/csv/README.md)|text/plain|a markdown table| +| `ssv`|[ssv](/web/api/formatters/ssv/README.md)|text/plain|a space separated list of values| +| `ssvcomma`|[ssv](/web/api/formatters/ssv/README.md)|text/plain|a comma separated list of values| +| `tsv`|[csv](/web/api/formatters/csv/README.md)|text/plain|a TAB delimited `csv` (MS Excel flavor)| + +For examples of each format, check the relative module documentation. + +## Metadata with the `jsonwrap` option + +All data queries can be encapsulated to JSON object having metadata about the query and the results. + +This is done by adding the `options=jsonwrap` to the API URL (if there are other `options` append +`,jsonwrap` to the existing ones). + +This is such an object: + +```bash +# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=system.cpu&after=-3600&points=6&group=average&format=csv&options=nonzero,jsonwrap' +{ + "api": 1, + "id": "system.cpu", + "name": "system.cpu", + "view_update_every": 600, + "update_every": 1, + "first_entry": 1540387074, + "last_entry": 1540647070, + "before": 1540647000, + "after": 1540644000, + "dimension_names": ["steal", "softirq", "user", "system", "iowait"], + "dimension_ids": ["steal", "softirq", "user", "system", "iowait"], + "latest_values": [0, 0.2493766, 1.745636, 0.4987531, 0], + "view_latest_values": [0.0158314, 0.0516506, 0.866549, 0.7196127, 0.0050002], + "dimensions": 5, + "points": 6, + "format": "csv", + "result": "time,steal,softirq,user,system,iowait\n2018-10-27 13:30:00,0.0158314,0.0516506,0.866549,0.7196127,0.0050002\n2018-10-27 13:20:00,0.0149856,0.0529183,0.8673155,0.7121144,0.0049979\n2018-10-27 13:10:00,0.0137501,0.053315,0.8578097,0.7197613,0.0054209\n2018-10-27 13:00:00,0.0154252,0.0554688,0.899432,0.7200638,0.0067252\n2018-10-27 12:50:00,0.0145866,0.0495922,0.8404341,0.7011141,0.0041688\n2018-10-27 12:40:00,0.0162366,0.0595954,0.8827475,0.7020573,0.0041636\n", + "min": 0, + "max": 0 +} +``` + +## Downloading data query result files + +Following the [Google Visualization Provider guidelines](https://developers.google.com/chart/interactive/docs/dev/implementing_data_source), +Netdata supports parsing `tqx` options. + +Using these options, any Netdata data query can instruct the web browser to download +the result and save it under a given filename. + +For example, to download a CSV file with CPU utilization of the last hour, +[click here](https://registry.my-netdata.io/api/v1/data?chart=system.cpu&after=-3600&format=csv&options=nonzero&tqx=outFileName:system+cpu+utilization+of+the+last_hour.csv). + +This is done by appending `&tqx=outFileName:FILENAME` to any data query. +The output will be in the format given with `&format=`. + + diff --git a/web/api/formatters/charts2json.c b/web/api/formatters/charts2json.c new file mode 100644 index 0000000..1fc20b4 --- /dev/null +++ b/web/api/formatters/charts2json.c @@ -0,0 +1,191 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "charts2json.h" + +// generate JSON for the /api/v1/charts API call + +const char* get_release_channel() { + static int use_stable = -1; + + if (use_stable == -1) { + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s/.environment", netdata_configured_user_config_dir); + procfile *ff = procfile_open(filename, "=", PROCFILE_FLAG_DEFAULT); + if (ff) { + procfile_set_quotes(ff, "'\""); + ff = procfile_readall(ff); + if (ff) { + unsigned int i; + for (i = 0; i < procfile_lines(ff); i++) { + if (!procfile_linewords(ff, i)) + continue; + if (!strcmp(procfile_lineword(ff, i, 0), "RELEASE_CHANNEL")) { + if (!strcmp(procfile_lineword(ff, i, 1), "stable")) + use_stable = 1; + else if (!strcmp(procfile_lineword(ff, i, 1), "nightly")) + use_stable = 0; + break; + } + } + procfile_close(ff); + } + } + if (use_stable == -1) + use_stable = strchr(program_version, '-') ? 0 : 1; + } + return (use_stable)?"stable":"nightly"; +} + +void charts2json(RRDHOST *host, BUFFER *wb, int skip_volatile, int show_archived) { + static char *custom_dashboard_info_js_filename = NULL; + size_t c, dimensions = 0, memory = 0, alarms = 0; + RRDSET *st; + + time_t now = now_realtime_sec(); + + if(unlikely(!custom_dashboard_info_js_filename)) + custom_dashboard_info_js_filename = config_get(CONFIG_SECTION_WEB, "custom dashboard_info.js", ""); + + buffer_sprintf(wb, "{\n" + "\t\"hostname\": \"%s\"" + ",\n\t\"version\": \"%s\"" + ",\n\t\"release_channel\": \"%s\"" + ",\n\t\"os\": \"%s\"" + ",\n\t\"timezone\": \"%s\"" + ",\n\t\"update_every\": %d" + ",\n\t\"history\": %ld" + ",\n\t\"memory_mode\": \"%s\"" + ",\n\t\"custom_info\": \"%s\"" + ",\n\t\"charts\": {" + , rrdhost_hostname(host) + , rrdhost_program_version(host) + , get_release_channel() + , rrdhost_os(host) + , rrdhost_timezone(host) + , host->rrd_update_every + , host->rrd_history_entries + , rrd_memory_mode_name(host->rrd_memory_mode) + , custom_dashboard_info_js_filename + ); + + c = 0; + rrdset_foreach_read(st, host) { + if ((!show_archived && rrdset_is_available_for_viewers(st)) || (show_archived && rrdset_is_archived(st))) { + if(c) buffer_strcat(wb, ","); + buffer_strcat(wb, "\n\t\t\""); + buffer_strcat(wb, rrdset_id(st)); + buffer_strcat(wb, "\": "); + rrdset2json(st, wb, &dimensions, &memory, skip_volatile); + + c++; + st->last_accessed_time = now; + } + } + rrdset_foreach_done(st); + + RRDCALC *rc; + foreach_rrdcalc_in_rrdhost_read(host, rc) { + if(rc->rrdset) + alarms++; + } + foreach_rrdcalc_in_rrdhost_done(rc); + + buffer_sprintf(wb + , "\n\t}" + ",\n\t\"charts_count\": %zu" + ",\n\t\"dimensions_count\": %zu" + ",\n\t\"alarms_count\": %zu" + ",\n\t\"rrd_memory_bytes\": %zu" + ",\n\t\"hosts_count\": %zu" + ",\n\t\"hosts\": [" + , c + , dimensions + , alarms + , memory + , rrd_hosts_available + ); + + if(unlikely(rrd_hosts_available > 1)) { + rrd_rdlock(); + + size_t found = 0; + RRDHOST *h; + rrdhost_foreach_read(h) { + if(!rrdhost_should_be_removed(h, host, now) && !rrdhost_flag_check(h, RRDHOST_FLAG_ARCHIVED)) { + buffer_sprintf(wb + , "%s\n\t\t{" + "\n\t\t\t\"hostname\": \"%s\"" + "\n\t\t}" + , (found > 0) ? "," : "" + , rrdhost_hostname(h) + ); + + found++; + } + } + + rrd_unlock(); + } + else { + buffer_sprintf(wb + , "\n\t\t{" + "\n\t\t\t\"hostname\": \"%s\"" + "\n\t\t}" + , rrdhost_hostname(host) + ); + } + + buffer_sprintf(wb, "\n\t]\n}\n"); +} + +// generate collectors list for the api/v1/info call + +struct collector { + const char *plugin; + const char *module; +}; + +struct array_printer { + int c; + BUFFER *wb; +}; + +static int print_collector_callback(const DICTIONARY_ITEM *item __maybe_unused, void *entry, void *data) { + struct array_printer *ap = (struct array_printer *)data; + BUFFER *wb = ap->wb; + struct collector *col=(struct collector *) entry; + if(ap->c) buffer_strcat(wb, ","); + buffer_strcat(wb, "\n\t\t{\n\t\t\t\"plugin\": \""); + buffer_strcat(wb, col->plugin); + buffer_strcat(wb, "\",\n\t\t\t\"module\": \""); + buffer_strcat(wb, col->module); + buffer_strcat(wb, "\"\n\t\t}"); + (ap->c)++; + return 0; +} + +void chartcollectors2json(RRDHOST *host, BUFFER *wb) { + DICTIONARY *dict = dictionary_create(DICT_OPTION_SINGLE_THREADED); + RRDSET *st; + char name[500]; + + time_t now = now_realtime_sec(); + rrdset_foreach_read(st, host) { + if (rrdset_is_available_for_viewers(st)) { + struct collector col = { + .plugin = rrdset_plugin_name(st), + .module = rrdset_module_name(st) + }; + sprintf(name, "%s:%s", col.plugin, col.module); + dictionary_set(dict, name, &col, sizeof(struct collector)); + st->last_accessed_time = now; + } + } + rrdset_foreach_done(st); + struct array_printer ap = { + .c = 0, + .wb = wb + }; + dictionary_walkthrough_read(dict, print_collector_callback, &ap); + dictionary_destroy(dict); +} diff --git a/web/api/formatters/charts2json.h b/web/api/formatters/charts2json.h new file mode 100644 index 0000000..d4b04af --- /dev/null +++ b/web/api/formatters/charts2json.h @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_API_FORMATTER_CHARTS2JSON_H +#define NETDATA_API_FORMATTER_CHARTS2JSON_H + +#include "rrd2json.h" + +void charts2json(RRDHOST *host, BUFFER *wb, int skip_volatile, int show_archived); +void chartcollectors2json(RRDHOST *host, BUFFER *wb); +const char* get_release_channel(); + +#endif //NETDATA_API_FORMATTER_CHARTS2JSON_H diff --git a/web/api/formatters/csv/Makefile.am b/web/api/formatters/csv/Makefile.am new file mode 100644 index 0000000..161784b --- /dev/null +++ b/web/api/formatters/csv/Makefile.am @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/web/api/formatters/csv/README.md b/web/api/formatters/csv/README.md new file mode 100644 index 0000000..df7c11e --- /dev/null +++ b/web/api/formatters/csv/README.md @@ -0,0 +1,144 @@ +<!-- +title: "CSV formatter" +custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/formatters/csv/README.md +--> + +# CSV formatter + +The CSV formatter presents [results of database queries](/web/api/queries/README.md) in the following formats: + +| format|content type|description| +| :----:|:----------:|:----------| +| `csv`|text/plain|a text table, comma separated, with a header line (dimension names) and `\r\n` at the end of the lines| +| `csvjsonarray`|application/json|a JSON array, with each row as another array (the first row has the dimension names)| +| `tsv`|text/plain|like `csv` but TAB is used instead of comma to separate values (MS Excel flavor)| +| `html`|text/html|an html table| +| `markdown`|text/plain|markdown table| + +In all formats the date and time is the first column. + +The CSV formatter respects the following API `&options=`: + +| option|supported|description| +|:----:|:-------:|:----------| +| `nonzero`|yes|to return only the dimensions that have at least a non-zero value| +| `flip`|yes|to return the rows older to newer (the default is newer to older)| +| `seconds`|yes|to return the date and time in unix timestamp| +| `ms`|yes|to return the date and time in unit timestamp as milliseconds| +| `percent`|yes|to replace all values with their percentage over the row total| +| `abs`|yes|to turn all values positive| +| `null2zero`|yes|to replace gaps with zeros (the default prints the string `null`| + +## Examples + +Get the system total bandwidth for all physical network interfaces, over the last hour, +in 6 rows (one for every 10 minutes), in `csv` format: + +Netdata always returns bandwidth in `kilobits`. + +```bash +# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=system.net&format=csv&after=-3600&group=sum&points=6&options=abs' +time,received,sent +2018-10-26 23:50:00,90214.67847,215137.79762 +2018-10-26 23:40:00,90126.32286,238587.57522 +2018-10-26 23:30:00,86061.22688,213389.23526 +2018-10-26 23:20:00,85590.75164,206129.01608 +2018-10-26 23:10:00,83163.30691,194311.77384 +2018-10-26 23:00:00,85167.29657,197538.07773 +``` + +--- + +Get the max RAM used by the SQL server and any cron jobs, over the last hour, in 2 rows (one for every 30 +minutes), in `tsv` format, and format the date and time as unix timestamp: + +Netdata always returns memory in `MB`. + +```bash +# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=apps.mem&format=tsv&after=-3600&group=max&points=2&options=nonzero,seconds&dimensions=sql,cron' +time sql cron +1540598400 61.95703 0.25 +1540596600 61.95703 0.25 +``` + +--- + +Get an HTML table of the last 4 values (4 seconds) of system CPU utilization: + +Netdata always returns CPU utilization as `%`. + +```bash +# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=system.cpu&format=html&after=-4&options=nonzero' +<html> +<center> +<table border="0" cellpadding="5" cellspacing="5"> +<tr><td>time</td><td>softirq</td><td>user</td><td>system</td></tr> +<tr><td>2018-10-27 00:16:07</td><td>0.25</td><td>1</td><td>0.75</td></tr> +<tr><td>2018-10-27 00:16:06</td><td>0</td><td>1.0025063</td><td>0.5012531</td></tr> +<tr><td>2018-10-27 00:16:05</td><td>0</td><td>1</td><td>0.75</td></tr> +<tr><td>2018-10-27 00:16:04</td><td>0</td><td>1.0025063</td><td>0.7518797</td></tr> +</table> +</center> +</html> +``` + +This is how it looks when rendered by a web browser: + +![image](https://user-images.githubusercontent.com/2662304/47597887-bafbf480-d99c-11e8-864a-d880bb8d2e5b.png) + +--- + +Get a JSON array with the average bandwidth rate of the mysql server, over the last hour, in 6 values +(one every 10 minutes), and return the date and time in milliseconds: + +Netdata always returns bandwidth rates in `kilobits/s`. + +```bash +# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=mysql_local.net&format=csvjsonarray&after=-3600&points=6&group=average&options=abs,ms' +[ +["time","in","out"], +[1540599600000,0.7499986,120.2810185], +[1540599000000,0.7500019,120.2815509], +[1540598400000,0.7499999,120.2812319], +[1540597800000,0.7500044,120.2819634], +[1540597200000,0.7499968,120.2807337], +[1540596600000,0.7499988,120.2810527] +] +``` + +--- + +Get the number of processes started per minute, for the last 10 minutes, in `markdown` format: + +```bash +# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=system.forks&format=markdown&after=-600&points=10&group=sum' +time | started +:---: |:---: +2018-10-27 03:52:00| 245.1706149 +2018-10-27 03:51:00| 152.6654636 +2018-10-27 03:50:00| 163.1755789 +2018-10-27 03:49:00| 176.1574766 +2018-10-27 03:48:00| 178.0137076 +2018-10-27 03:47:00| 183.8306543 +2018-10-27 03:46:00| 264.1635621 +2018-10-27 03:45:00| 205.001551 +2018-10-27 03:44:00| 7026.9852167 +2018-10-27 03:43:00| 205.9904794 +``` + +And this is how it looks when formatted: + +| time | started | +|:--:|:-----:| +| 2018-10-27 03:52:00 | 245.1706149 | +| 2018-10-27 03:51:00 | 152.6654636 | +| 2018-10-27 03:50:00 | 163.1755789 | +| 2018-10-27 03:49:00 | 176.1574766 | +| 2018-10-27 03:48:00 | 178.0137076 | +| 2018-10-27 03:47:00 | 183.8306543 | +| 2018-10-27 03:46:00 | 264.1635621 | +| 2018-10-27 03:45:00 | 205.001551 | +| 2018-10-27 03:44:00 | 7026.9852167 | +| 2018-10-27 03:43:00 | 205.9904794 | + + diff --git a/web/api/formatters/csv/csv.c b/web/api/formatters/csv/csv.c new file mode 100644 index 0000000..603a171 --- /dev/null +++ b/web/api/formatters/csv/csv.c @@ -0,0 +1,142 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "libnetdata/libnetdata.h" +#include "csv.h" + +void rrdr2csv(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS options, const char *startline, const char *separator, const char *endline, const char *betweenlines) { + //info("RRD2CSV(): %s: BEGIN", r->st->id); + QUERY_TARGET *qt = r->internal.qt; + long c, i; + const long used = qt->query.used; + + // print the csv header + for(c = 0, i = 0; c < used ; c++) { + if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue; + if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue; + + if(!i) { + buffer_strcat(wb, startline); + if(options & RRDR_OPTION_LABEL_QUOTES) buffer_strcat(wb, "\""); + buffer_strcat(wb, "time"); + if(options & RRDR_OPTION_LABEL_QUOTES) buffer_strcat(wb, "\""); + } + buffer_strcat(wb, separator); + if(options & RRDR_OPTION_LABEL_QUOTES) buffer_strcat(wb, "\""); + buffer_strcat(wb, string2str(qt->query.array[c].dimension.name)); + if(options & RRDR_OPTION_LABEL_QUOTES) buffer_strcat(wb, "\""); + i++; + } + buffer_strcat(wb, endline); + + if(format == DATASOURCE_CSV_MARKDOWN) { + // print the --- line after header + for(c = 0, i = 0; c < used ;c++) { + if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue; + if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue; + + if(!i) { + buffer_strcat(wb, startline); + if(options & RRDR_OPTION_LABEL_QUOTES) buffer_strcat(wb, "\""); + buffer_strcat(wb, ":---:"); + if(options & RRDR_OPTION_LABEL_QUOTES) buffer_strcat(wb, "\""); + } + buffer_strcat(wb, separator); + if(options & RRDR_OPTION_LABEL_QUOTES) buffer_strcat(wb, "\""); + buffer_strcat(wb, ":---:"); + if(options & RRDR_OPTION_LABEL_QUOTES) buffer_strcat(wb, "\""); + i++; + } + buffer_strcat(wb, endline); + } + + if(!i) { + // no dimensions present + return; + } + + long start = 0, end = rrdr_rows(r), step = 1; + if(!(options & RRDR_OPTION_REVERSED)) { + start = rrdr_rows(r) - 1; + end = -1; + step = -1; + } + + // for each line in the array + NETDATA_DOUBLE total = 1; + for(i = start; i != end ;i += step) { + NETDATA_DOUBLE *cn = &r->v[ i * r->d ]; + RRDR_VALUE_FLAGS *co = &r->o[ i * r->d ]; + + buffer_strcat(wb, betweenlines); + buffer_strcat(wb, startline); + + time_t now = r->t[i]; + + if((options & RRDR_OPTION_SECONDS) || (options & RRDR_OPTION_MILLISECONDS)) { + // print the timestamp of the line + buffer_rrd_value(wb, (NETDATA_DOUBLE)now); + // in ms + if(options & RRDR_OPTION_MILLISECONDS) buffer_strcat(wb, "000"); + } + else { + // generate the local date time + struct tm tmbuf, *tm = localtime_r(&now, &tmbuf); + if(!tm) { error("localtime() failed."); continue; } + buffer_date(wb, tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec); + } + + int set_min_max = 0; + if(unlikely(options & RRDR_OPTION_PERCENTAGE)) { + total = 0; + for(c = 0; c < used ;c++) { + NETDATA_DOUBLE n = cn[c]; + + if(likely((options & RRDR_OPTION_ABSOLUTE) && n < 0)) + n = -n; + + total += n; + } + // prevent a division by zero + if(total == 0) total = 1; + set_min_max = 1; + } + + // for each dimension + for(c = 0; c < used ;c++) { + if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue; + if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue; + + buffer_strcat(wb, separator); + + NETDATA_DOUBLE n = cn[c]; + + if(co[c] & RRDR_VALUE_EMPTY) { + if(options & RRDR_OPTION_NULL2ZERO) + buffer_strcat(wb, "0"); + else + buffer_strcat(wb, "null"); + } + else { + if(unlikely((options & RRDR_OPTION_ABSOLUTE) && n < 0)) + n = -n; + + if(unlikely(options & RRDR_OPTION_PERCENTAGE)) { + n = n * 100 / total; + + if(unlikely(set_min_max)) { + r->min = r->max = n; + set_min_max = 0; + } + + if(n < r->min) r->min = n; + if(n > r->max) r->max = n; + } + + buffer_rrd_value(wb, n); + } + } + + buffer_strcat(wb, endline); + } + //info("RRD2CSV(): %s: END", r->st->id); +} diff --git a/web/api/formatters/csv/csv.h b/web/api/formatters/csv/csv.h new file mode 100644 index 0000000..666d4c6 --- /dev/null +++ b/web/api/formatters/csv/csv.h @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_API_FORMATTER_CSV_H +#define NETDATA_API_FORMATTER_CSV_H + +#include "web/api/queries/rrdr.h" + +void rrdr2csv(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS options, const char *startline, const char *separator, const char *endline, const char *betweenlines); + +#include "../rrd2json.h" + +#endif //NETDATA_API_FORMATTER_CSV_H diff --git a/web/api/formatters/json/Makefile.am b/web/api/formatters/json/Makefile.am new file mode 100644 index 0000000..161784b --- /dev/null +++ b/web/api/formatters/json/Makefile.am @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/web/api/formatters/json/README.md b/web/api/formatters/json/README.md new file mode 100644 index 0000000..a0f8108 --- /dev/null +++ b/web/api/formatters/json/README.md @@ -0,0 +1,156 @@ +<!-- +title: "JSON formatter" +custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/formatters/json/README.md +--> + +# JSON formatter + +The CSV formatter presents [results of database queries](/web/api/queries/README.md) in the following formats: + +| format | content type | description| +|:----:|:----------:|:----------| +| `json` | application/json | return the query result as a json object| +| `jsonp` | application/json | return the query result as a JSONP javascript callback| +| `datatable` | application/json | return the query result as a Google `datatable`| +| `datasource` | application/json | return the query result as a Google Visualization Provider `datasource` javascript callback| + +The CSV formatter respects the following API `&options=`: + +| option | supported | description| +|:----:|:-------:|:----------| +| `google_json` | yes | enable the Google flavor of JSON (using double quotes for strings and `Date()` function for dates| +| `objectrows` | yes | return each row as an object, instead of an array| +| `nonzero` | yes | to return only the dimensions that have at least a non-zero value| +| `flip` | yes | to return the rows older to newer (the default is newer to older)| +| `seconds` | yes | to return the date and time in unix timestamp| +| `ms` | yes | to return the date and time in unit timestamp as milliseconds| +| `percent` | yes | to replace all values with their percentage over the row total| +| `abs` | yes | to turn all values positive| +| `null2zero` | yes | to replace gaps with zeros (the default prints the string `null`| + +## Examples + +To show the differences between each format, in the following examples we query the same +chart (having just one dimension called `active`), changing only the query `format` and its `options`. + +> Using `format=json` and `options=` + +```bash +# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=nginx_local.connections&after=-3600&points=6&group=average&format=json&options=' +{ + "labels": ["time", "active"], + "data": + [ + [ 1540644600, 224.2516667], + [ 1540644000, 229.29], + [ 1540643400, 222.41], + [ 1540642800, 226.6816667], + [ 1540642200, 246.4083333], + [ 1540641600, 241.0966667] + ] +} +``` + +> Using `format=json` and `options=objectrows` + +```bash +# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=nginx_local.connections&after=-3600&points=6&group=average&format=json&options=objectrows' +{ + "labels": ["time", "active"], + "data": + [ + { "time": 1540644600, "active": 224.2516667}, + { "time": 1540644000, "active": 229.29}, + { "time": 1540643400, "active": 222.41}, + { "time": 1540642800, "active": 226.6816667}, + { "time": 1540642200, "active": 246.4083333}, + { "time": 1540641600, "active": 241.0966667} + ] +} +``` + +> Using `format=json` and `options=objectrows,google_json` + +```bash +# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=nginx_local.connections&after=-3600&points=6&group=average&formatjson&options=objectrows,google_json' +{ + "labels": ["time", "active"], + "data": + [ + { "time": new Date(2018,9,27,12,50,0), "active": 224.2516667}, + { "time": new Date(2018,9,27,12,40,0), "active": 229.29}, + { "time": new Date(2018,9,27,12,30,0), "active": 222.41}, + { "time": new Date(2018,9,27,12,20,0), "active": 226.6816667}, + { "time": new Date(2018,9,27,12,10,0), "active": 246.4083333}, + { "time": new Date(2018,9,27,12,0,0), "active": 241.0966667} + ] +} +``` + +> Using `format=jsonp` and `options=` + +```bash +curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=nginx_local.connections&after=-3600&points=6&group=average&formjsonp&options=' +callback({ + "labels": ["time", "active"], + "data": + [ + [ 1540645200, 235.885], + [ 1540644600, 224.2516667], + [ 1540644000, 229.29], + [ 1540643400, 222.41], + [ 1540642800, 226.6816667], + [ 1540642200, 246.4083333] + ] +}); +``` + +> Using `format=datatable` and `options=` + +```bash +curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=nginx_local.connections&after=-3600&points=6&group=average&formdatatable&options=' +{ + "cols": + [ + {"id":"","label":"time","pattern":"","type":"datetime"}, + {"id":"","label":"","pattern":"","type":"string","p":{"role":"annotation"}}, + {"id":"","label":"","pattern":"","type":"string","p":{"role":"annotationText"}}, + {"id":"","label":"active","pattern":"","type":"number"} + ], + "rows": + [ + {"c":[{"v":"Date(2018,9,27,13,0,0)"},{"v":null},{"v":null},{"v":235.885}]}, + {"c":[{"v":"Date(2018,9,27,12,50,0)"},{"v":null},{"v":null},{"v":224.2516667}]}, + {"c":[{"v":"Date(2018,9,27,12,40,0)"},{"v":null},{"v":null},{"v":229.29}]}, + {"c":[{"v":"Date(2018,9,27,12,30,0)"},{"v":null},{"v":null},{"v":222.41}]}, + {"c":[{"v":"Date(2018,9,27,12,20,0)"},{"v":null},{"v":null},{"v":226.6816667}]}, + {"c":[{"v":"Date(2018,9,27,12,10,0)"},{"v":null},{"v":null},{"v":246.4083333}]} + ] +} +``` + +> Using `format=datasource` and `options=` + +```bash +curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=nginx_local.connections&after=-3600&points=6&group=average&format=datasource&options=' +google.visualization.Query.setResponse({version:'0.6',reqId:'0',status:'ok',sig:'1540645368',table:{ + "cols": + [ + {"id":"","label":"time","pattern":"","type":"datetime"}, + {"id":"","label":"","pattern":"","type":"string","p":{"role":"annotation"}}, + {"id":"","label":"","pattern":"","type":"string","p":{"role":"annotationText"}}, + {"id":"","label":"active","pattern":"","type":"number"} + ], + "rows": + [ + {"c":[{"v":"Date(2018,9,27,13,0,0)"},{"v":null},{"v":null},{"v":235.885}]}, + {"c":[{"v":"Date(2018,9,27,12,50,0)"},{"v":null},{"v":null},{"v":224.2516667}]}, + {"c":[{"v":"Date(2018,9,27,12,40,0)"},{"v":null},{"v":null},{"v":229.29}]}, + {"c":[{"v":"Date(2018,9,27,12,30,0)"},{"v":null},{"v":null},{"v":222.41}]}, + {"c":[{"v":"Date(2018,9,27,12,20,0)"},{"v":null},{"v":null},{"v":226.6816667}]}, + {"c":[{"v":"Date(2018,9,27,12,10,0)"},{"v":null},{"v":null},{"v":246.4083333}]} + ] +}}); +``` + + diff --git a/web/api/formatters/json/json.c b/web/api/formatters/json/json.c new file mode 100644 index 0000000..608150c --- /dev/null +++ b/web/api/formatters/json/json.c @@ -0,0 +1,283 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "json.h" + +#define JSON_DATES_JS 1 +#define JSON_DATES_TIMESTAMP 2 + +void rrdr2json(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, int datatable) { + //info("RRD2JSON(): %s: BEGIN", r->st->id); + int row_annotations = 0, dates, dates_with_new = 0; + char kq[2] = "", // key quote + sq[2] = "", // string quote + pre_label[101] = "", // before each label + post_label[101] = "", // after each label + pre_date[101] = "", // the beginning of line, to the date + post_date[101] = "", // closing the date + pre_value[101] = "", // before each value + post_value[101] = "", // after each value + post_line[101] = "", // at the end of each row + normal_annotation[201] = "", // default row annotation + overflow_annotation[201] = "", // overflow row annotation + data_begin[101] = "", // between labels and values + finish[101] = "", // at the end of everything + object_rows_time[101] = ""; + + if(datatable) { + dates = JSON_DATES_JS; + if( options & RRDR_OPTION_GOOGLE_JSON ) { + kq[0] = '\0'; + sq[0] = '\''; + } + else { + kq[0] = '"'; + sq[0] = '"'; + } + row_annotations = 1; + snprintfz(pre_date, 100, " {%sc%s:[{%sv%s:%s", kq, kq, kq, kq, sq); + snprintfz(post_date, 100, "%s}", sq); + snprintfz(pre_label, 100, ",\n {%sid%s:%s%s,%slabel%s:%s", kq, kq, sq, sq, kq, kq, sq); + snprintfz(post_label, 100, "%s,%spattern%s:%s%s,%stype%s:%snumber%s}", sq, kq, kq, sq, sq, kq, kq, sq, sq); + snprintfz(pre_value, 100, ",{%sv%s:", kq, kq); + strcpy(post_value, "}"); + strcpy(post_line, "]}"); + snprintfz(data_begin, 100, "\n ],\n %srows%s:\n [\n", kq, kq); + strcpy(finish, "\n]\n}"); + + snprintfz(overflow_annotation, 200, ",{%sv%s:%sRESET OR OVERFLOW%s},{%sv%s:%sThe counters have been wrapped.%s}", kq, kq, sq, sq, kq, kq, sq, sq); + snprintfz(normal_annotation, 200, ",{%sv%s:null},{%sv%s:null}", kq, kq, kq, kq); + + buffer_sprintf(wb, "{\n %scols%s:\n [\n", kq, kq); + buffer_sprintf(wb, " {%sid%s:%s%s,%slabel%s:%stime%s,%spattern%s:%s%s,%stype%s:%sdatetime%s},\n", kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq); + buffer_sprintf(wb, " {%sid%s:%s%s,%slabel%s:%s%s,%spattern%s:%s%s,%stype%s:%sstring%s,%sp%s:{%srole%s:%sannotation%s}},\n", kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, kq, kq, sq, sq); + buffer_sprintf(wb, " {%sid%s:%s%s,%slabel%s:%s%s,%spattern%s:%s%s,%stype%s:%sstring%s,%sp%s:{%srole%s:%sannotationText%s}}", kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, kq, kq, sq, sq); + + // remove the valueobjects flag + // google wants its own keys + if(options & RRDR_OPTION_OBJECTSROWS) + options &= ~RRDR_OPTION_OBJECTSROWS; + } + else { + kq[0] = '"'; + sq[0] = '"'; + if(options & RRDR_OPTION_GOOGLE_JSON) { + dates = JSON_DATES_JS; + dates_with_new = 1; + } + else { + dates = JSON_DATES_TIMESTAMP; + dates_with_new = 0; + } + if( options & RRDR_OPTION_OBJECTSROWS ) + strcpy(pre_date, " { "); + else + strcpy(pre_date, " [ "); + strcpy(pre_label, ",\""); + strcpy(post_label, "\""); + strcpy(pre_value, ","); + if( options & RRDR_OPTION_OBJECTSROWS ) + strcpy(post_line, "}"); + else + strcpy(post_line, "]"); + snprintfz(data_begin, 100, "],\n %sdata%s:\n [\n", kq, kq); + strcpy(finish, "\n]\n}"); + + buffer_sprintf(wb, "{\n %slabels%s: [", kq, kq); + buffer_sprintf(wb, "%stime%s", sq, sq); + + if( options & RRDR_OPTION_OBJECTSROWS ) + snprintfz(object_rows_time, 100, "%stime%s: ", kq, kq); + + } + + size_t pre_value_len = strlen(pre_value); + size_t post_value_len = strlen(post_value); + size_t pre_label_len = strlen(pre_label); + size_t post_label_len = strlen(post_label); + size_t pre_date_len = strlen(pre_date); + size_t post_date_len = strlen(post_date); + size_t post_line_len = strlen(post_line); + size_t normal_annotation_len = strlen(normal_annotation); + size_t overflow_annotation_len = strlen(overflow_annotation); + size_t object_rows_time_len = strlen(object_rows_time); + + // ------------------------------------------------------------------------- + // print the JSON header + + QUERY_TARGET *qt = r->internal.qt; + long c, i; + const long used = qt->query.used; + + // print the header lines + for(c = 0, i = 0; c < used ; c++) { + if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue; + if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue; + + buffer_fast_strcat(wb, pre_label, pre_label_len); + buffer_strcat(wb, string2str(qt->query.array[c].dimension.name)); + buffer_fast_strcat(wb, post_label, post_label_len); + i++; + } + + if(!i) { + buffer_fast_strcat(wb, pre_label, pre_label_len); + buffer_fast_strcat(wb, "no data", 7); + buffer_fast_strcat(wb, post_label, post_label_len); + } + size_t total_number_of_dimensions = i; + + // print the beginning of row data + buffer_strcat(wb, data_begin); + + // if all dimensions are hidden, print a null + if(!i) { + buffer_strcat(wb, finish); + return; + } + + long start = 0, end = rrdr_rows(r), step = 1; + if(!(options & RRDR_OPTION_REVERSED)) { + start = rrdr_rows(r) - 1; + end = -1; + step = -1; + } + + // pre-allocate a large enough buffer for us + // this does not need to be accurate - it is just a hint to avoid multiple realloc(). + buffer_need_bytes(wb, + ( 20 * rrdr_rows(r)) // timestamp + json overhead + + ( (pre_value_len + post_value_len + 4) * total_number_of_dimensions * rrdr_rows(r) ) // number + ); + + // for each line in the array + NETDATA_DOUBLE total = 1; + for(i = start; i != end ;i += step) { + NETDATA_DOUBLE *cn = &r->v[ i * r->d ]; + RRDR_VALUE_FLAGS *co = &r->o[ i * r->d ]; + NETDATA_DOUBLE *ar = &r->ar[ i * r->d ]; + + time_t now = r->t[i]; + + if(dates == JSON_DATES_JS) { + // generate the local date time + struct tm tmbuf, *tm = localtime_r(&now, &tmbuf); + if(!tm) { error("localtime_r() failed."); continue; } + + if(likely(i != start)) buffer_fast_strcat(wb, ",\n", 2); + buffer_fast_strcat(wb, pre_date, pre_date_len); + + if( options & RRDR_OPTION_OBJECTSROWS ) + buffer_fast_strcat(wb, object_rows_time, object_rows_time_len); + + if(unlikely(dates_with_new)) + buffer_fast_strcat(wb, "new ", 4); + + buffer_jsdate(wb, tm->tm_year + 1900, tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec); + + buffer_fast_strcat(wb, post_date, post_date_len); + + if(unlikely(row_annotations)) { + // google supports one annotation per row + int annotation_found = 0; + for(c = 0; c < used ; c++) { + if(unlikely(!(r->od[c] & RRDR_DIMENSION_SELECTED))) continue; + + if(unlikely(co[c] & RRDR_VALUE_RESET)) { + buffer_fast_strcat(wb, overflow_annotation, overflow_annotation_len); + annotation_found = 1; + break; + } + } + if(likely(!annotation_found)) + buffer_fast_strcat(wb, normal_annotation, normal_annotation_len); + } + } + else { + // print the timestamp of the line + if(likely(i != start)) + buffer_fast_strcat(wb, ",\n", 2); + + buffer_fast_strcat(wb, pre_date, pre_date_len); + + if(unlikely( options & RRDR_OPTION_OBJECTSROWS )) + buffer_fast_strcat(wb, object_rows_time, object_rows_time_len); + + buffer_rrd_value(wb, (NETDATA_DOUBLE)r->t[i]); + + // in ms + if(unlikely(options & RRDR_OPTION_MILLISECONDS)) + buffer_fast_strcat(wb, "000", 3); + + buffer_fast_strcat(wb, post_date, post_date_len); + } + + int set_min_max = 0; + if(unlikely(options & RRDR_OPTION_PERCENTAGE)) { + total = 0; + for(c = 0; c < used ;c++) { + NETDATA_DOUBLE n; + if(unlikely(options & RRDR_OPTION_INTERNAL_AR)) + n = ar[c]; + else + n = cn[c]; + + if(likely((options & RRDR_OPTION_ABSOLUTE) && n < 0)) + n = -n; + + total += n; + } + // prevent a division by zero + if(total == 0) total = 1; + set_min_max = 1; + } + + // for each dimension + for(c = 0; c < used ;c++) { + if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue; + if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue; + + NETDATA_DOUBLE n; + if(unlikely(options & RRDR_OPTION_INTERNAL_AR)) + n = ar[c]; + else + n = cn[c]; + + buffer_fast_strcat(wb, pre_value, pre_value_len); + + if(unlikely( options & RRDR_OPTION_OBJECTSROWS )) + buffer_sprintf(wb, "%s%s%s: ", kq, string2str(qt->query.array[c].dimension.name), kq); + + if(co[c] & RRDR_VALUE_EMPTY && !(options & RRDR_OPTION_INTERNAL_AR)) { + if(unlikely(options & RRDR_OPTION_NULL2ZERO)) + buffer_fast_strcat(wb, "0", 1); + else + buffer_fast_strcat(wb, "null", 4); + } + else { + if(unlikely((options & RRDR_OPTION_ABSOLUTE) && n < 0)) + n = -n; + + if(unlikely(options & RRDR_OPTION_PERCENTAGE)) { + n = n * 100 / total; + + if(unlikely(set_min_max)) { + r->min = r->max = n; + set_min_max = 0; + } + + if(n < r->min) r->min = n; + if(n > r->max) r->max = n; + } + + buffer_rrd_value(wb, n); + } + + buffer_fast_strcat(wb, post_value, post_value_len); + } + + buffer_fast_strcat(wb, post_line, post_line_len); + } + + buffer_strcat(wb, finish); + //info("RRD2JSON(): %s: END", r->st->id); +} diff --git a/web/api/formatters/json/json.h b/web/api/formatters/json/json.h new file mode 100644 index 0000000..fb59e5c --- /dev/null +++ b/web/api/formatters/json/json.h @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_API_FORMATTER_JSON_H +#define NETDATA_API_FORMATTER_JSON_H + +#include "../rrd2json.h" + +void rrdr2json(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, int datatable); + +#endif //NETDATA_API_FORMATTER_JSON_H diff --git a/web/api/formatters/json_wrapper.c b/web/api/formatters/json_wrapper.c new file mode 100644 index 0000000..8b9b752 --- /dev/null +++ b/web/api/formatters/json_wrapper.c @@ -0,0 +1,423 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "json_wrapper.h" + +struct value_output { + int c; + BUFFER *wb; +}; + +static int value_list_output_callback(const DICTIONARY_ITEM *item __maybe_unused, void *entry, void *data) { + struct value_output *ap = (struct value_output *)data; + BUFFER *wb = ap->wb; + char *output = (char *) entry; + if(ap->c) buffer_strcat(wb, ","); + buffer_strcat(wb, output); + (ap->c)++; + return 0; +} + +static int fill_formatted_callback(const char *name, const char *value, RRDLABEL_SRC ls, void *data) { + (void)ls; + DICTIONARY *dict = (DICTIONARY *)data; + char n[RRD_ID_LENGTH_MAX * 2 + 2]; + char output[RRD_ID_LENGTH_MAX * 2 + 8]; + char v[RRD_ID_LENGTH_MAX * 2 + 1]; + + sanitize_json_string(v, (char *)value, RRD_ID_LENGTH_MAX * 2); + int len = snprintfz(output, RRD_ID_LENGTH_MAX * 2 + 7, "[\"%s\", \"%s\"]", name, v); + snprintfz(n, RRD_ID_LENGTH_MAX * 2, "%s:%s", name, v); + dictionary_set(dict, n, output, len + 1); + + return 1; +} + +void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS options, int string_value, + RRDR_GROUPING group_method) +{ + QUERY_TARGET *qt = r->internal.qt; + + long rows = rrdr_rows(r); + long c, i; + const long query_used = qt->query.used; + + //info("JSONWRAPPER(): %s: BEGIN", r->st->id); + char kq[2] = "", // key quote + sq[2] = ""; // string quote + + if( options & RRDR_OPTION_GOOGLE_JSON ) { + kq[0] = '\0'; + sq[0] = '\''; + } + else { + kq[0] = '"'; + sq[0] = '"'; + } + + buffer_sprintf(wb, "{\n" + " %sapi%s: 1,\n" + " %sid%s: %s%s%s,\n" + " %sname%s: %s%s%s,\n" + " %sview_update_every%s: %lld,\n" + " %supdate_every%s: %lld,\n" + " %sfirst_entry%s: %lld,\n" + " %slast_entry%s: %lld,\n" + " %sbefore%s: %lld,\n" + " %safter%s: %lld,\n" + " %sgroup%s: %s%s%s,\n" + " %soptions%s: %s" + , kq, kq + , kq, kq, sq, qt->id, sq + , kq, kq, sq, qt->id, sq + , kq, kq, (long long)r->update_every + , kq, kq, (long long)qt->db.minimum_latest_update_every + , kq, kq, (long long)qt->db.first_time_t + , kq, kq, (long long)qt->db.last_time_t + , kq, kq, (long long)r->before + , kq, kq, (long long)r->after + , kq, kq, sq, web_client_api_request_v1_data_group_to_string(group_method), sq + , kq, kq, sq); + + web_client_api_request_v1_data_options_to_buffer(wb, r->internal.query_options); + + buffer_sprintf(wb, "%s,\n %sdimension_names%s: [", sq, kq, kq); + + for(c = 0, i = 0; c < query_used ; c++) { + if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue; + if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue; + + if(i) buffer_strcat(wb, ", "); + buffer_strcat(wb, sq); + buffer_strcat(wb, string2str(qt->query.array[c].dimension.name)); + buffer_strcat(wb, sq); + i++; + } + if(!i) { +#ifdef NETDATA_INTERNAL_CHECKS + error("QUERY: '%s', RRDR is empty, %zu dimensions, options is 0x%08x", qt->id, r->d, options); +#endif + rows = 0; + buffer_strcat(wb, sq); + buffer_strcat(wb, "no data"); + buffer_strcat(wb, sq); + } + + buffer_sprintf(wb, "],\n" + " %sdimension_ids%s: [" + , kq, kq); + + for(c = 0, i = 0; c < query_used ; c++) { + if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue; + if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue; + + if(i) buffer_strcat(wb, ", "); + buffer_strcat(wb, sq); + buffer_strcat(wb, string2str(qt->query.array[c].dimension.id)); + buffer_strcat(wb, sq); + i++; + } + if(!i) { + rows = 0; + buffer_strcat(wb, sq); + buffer_strcat(wb, "no data"); + buffer_strcat(wb, sq); + } + buffer_strcat(wb, "],\n"); + + if (r->internal.query_options & RRDR_OPTION_ALL_DIMENSIONS) { + buffer_sprintf(wb, " %sfull_dimension_list%s: [", kq, kq); + + char name[RRD_ID_LENGTH_MAX * 2 + 2]; + char output[RRD_ID_LENGTH_MAX * 2 + 8]; + + struct value_output co = {.c = 0, .wb = wb}; + + DICTIONARY *dict = dictionary_create(DICT_OPTION_SINGLE_THREADED|DICT_OPTION_DONT_OVERWRITE_VALUE); + for (c = 0; c < (long)qt->metrics.used ;c++) { + snprintfz(name, RRD_ID_LENGTH_MAX * 2 + 1, "%s:%s", + rrdmetric_acquired_id(qt->metrics.array[c]), + rrdmetric_acquired_name(qt->metrics.array[c])); + + int len = snprintfz(output, RRD_ID_LENGTH_MAX * 2 + 7, "[\"%s\",\"%s\"]", + rrdmetric_acquired_id(qt->metrics.array[c]), + rrdmetric_acquired_name(qt->metrics.array[c])); + + dictionary_set(dict, name, output, len + 1); + } + dictionary_walkthrough_read(dict, value_list_output_callback, &co); + dictionary_destroy(dict); + + co.c = 0; + buffer_sprintf(wb, "],\n %sfull_chart_list%s: [", kq, kq); + dict = dictionary_create(DICT_OPTION_SINGLE_THREADED|DICT_OPTION_DONT_OVERWRITE_VALUE); + for (c = 0; c < (long)qt->instances.used ; c++) { + RRDINSTANCE_ACQUIRED *ria = qt->instances.array[c]; + + snprintfz(name, RRD_ID_LENGTH_MAX * 2 + 1, "%s:%s", + rrdinstance_acquired_id(ria), + rrdinstance_acquired_name(ria)); + + int len = snprintfz(output, RRD_ID_LENGTH_MAX * 2 + 7, "[\"%s\",\"%s\"]", + rrdinstance_acquired_id(ria), + rrdinstance_acquired_name(ria)); + + dictionary_set(dict, name, output, len + 1); + } + dictionary_walkthrough_read(dict, value_list_output_callback, &co); + dictionary_destroy(dict); + + co.c = 0; + buffer_sprintf(wb, "],\n %sfull_chart_labels%s: [", kq, kq); + dict = dictionary_create(DICT_OPTION_SINGLE_THREADED|DICT_OPTION_DONT_OVERWRITE_VALUE); + for (c = 0; c < (long)qt->instances.used ; c++) { + RRDINSTANCE_ACQUIRED *ria = qt->instances.array[c]; + rrdlabels_walkthrough_read(rrdinstance_acquired_labels(ria), fill_formatted_callback, dict); + } + dictionary_walkthrough_read(dict, value_list_output_callback, &co); + dictionary_destroy(dict); + buffer_strcat(wb, "],\n"); + } + + // functions + { + DICTIONARY *funcs = dictionary_create(DICT_OPTION_SINGLE_THREADED|DICT_OPTION_DONT_OVERWRITE_VALUE); + RRDINSTANCE_ACQUIRED *ria = NULL; + for (c = 0; c < query_used ; c++) { + QUERY_METRIC *qm = &qt->query.array[c]; + if(qm->link.ria == ria) + continue; + + ria = qm->link.ria; + chart_functions_to_dict(rrdinstance_acquired_functions(ria), funcs); + } + + buffer_sprintf(wb, " %sfunctions%s: [", kq, kq); + void *t; (void)t; + dfe_start_read(funcs, t) { + const char *comma = ""; + if(t_dfe.counter) comma = ", "; + buffer_sprintf(wb, "%s%s%s%s", comma, sq, t_dfe.name, sq); + } + dfe_done(t); + dictionary_destroy(funcs); + buffer_strcat(wb, "],\n"); + } + + // context query + if (!qt->request.st) { + buffer_sprintf( + wb, + " %schart_ids%s: [", + kq, kq); + + for (c = 0, i = 0; c < query_used; c++) { + QUERY_METRIC *qm = &qt->query.array[c]; + + if (unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) + continue; + + if (unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) + continue; + + if (i) + buffer_strcat(wb, ", "); + buffer_strcat(wb, sq); + buffer_strcat(wb, string2str(qm->chart.id)); + buffer_strcat(wb, sq); + i++; + } + if (!i) { + rows = 0; + buffer_strcat(wb, sq); + buffer_strcat(wb, "no data"); + buffer_strcat(wb, sq); + } + buffer_strcat(wb, "],\n"); + if (qt->instances.chart_label_key_pattern) { + buffer_sprintf(wb, " %schart_labels%s: { ", kq, kq); + + SIMPLE_PATTERN *pattern = qt->instances.chart_label_key_pattern; + char *label_key = NULL; + int keys = 0; + while (pattern && (label_key = simple_pattern_iterate(&pattern))) { + if (keys) + buffer_strcat(wb, ", "); + buffer_sprintf(wb, "%s%s%s : [", kq, label_key, kq); + keys++; + + for (c = 0, i = 0; c < query_used; c++) { + QUERY_METRIC *qm = &qt->query.array[c]; + + if (unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) + continue; + if (unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) + continue; + + if (i) + buffer_strcat(wb, ", "); + rrdlabels_get_value_to_buffer_or_null(rrdinstance_acquired_labels(qm->link.ria), wb, label_key, sq, "null"); + i++; + } + if (!i) { + rows = 0; + buffer_strcat(wb, sq); + buffer_strcat(wb, "no data"); + buffer_strcat(wb, sq); + } + buffer_strcat(wb, "]"); + } + buffer_strcat(wb, "},\n"); + } + } + + buffer_sprintf(wb, " %slatest_values%s: [" + , kq, kq); + + for(c = 0, i = 0; c < query_used ;c++) { + QUERY_METRIC *qm = &qt->query.array[c]; + + if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue; + if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue; + + if(i) buffer_strcat(wb, ", "); + i++; + + NETDATA_DOUBLE value = rrdmetric_acquired_last_stored_value(qm->link.rma); + if (NAN == value) + buffer_strcat(wb, "null"); + else + buffer_rrd_value(wb, value); + } + if(!i) { + rows = 0; + buffer_strcat(wb, "null"); + } + + buffer_sprintf(wb, "],\n" + " %sview_latest_values%s: [" + , kq, kq); + + i = 0; + if(rows) { + NETDATA_DOUBLE total = 1; + + if(unlikely(options & RRDR_OPTION_PERCENTAGE)) { + total = 0; + for(c = 0; c < query_used ;c++) { + NETDATA_DOUBLE *cn = &r->v[ (rrdr_rows(r) - 1) * r->d ]; + NETDATA_DOUBLE n = cn[c]; + + if(likely((options & RRDR_OPTION_ABSOLUTE) && n < 0)) + n = -n; + + total += n; + } + // prevent a division by zero + if(total == 0) total = 1; + } + + for(c = 0, i = 0; c < query_used ;c++) { + if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue; + if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue; + + if(i) buffer_strcat(wb, ", "); + i++; + + NETDATA_DOUBLE *cn = &r->v[ (rrdr_rows(r) - 1) * r->d ]; + RRDR_VALUE_FLAGS *co = &r->o[ (rrdr_rows(r) - 1) * r->d ]; + NETDATA_DOUBLE n = cn[c]; + + if(co[c] & RRDR_VALUE_EMPTY) { + if(options & RRDR_OPTION_NULL2ZERO) + buffer_strcat(wb, "0"); + else + buffer_strcat(wb, "null"); + } + else { + if(unlikely((options & RRDR_OPTION_ABSOLUTE) && n < 0)) + n = -n; + + if(unlikely(options & RRDR_OPTION_PERCENTAGE)) + n = n * 100 / total; + + buffer_rrd_value(wb, n); + } + } + } + if(!i) { + rows = 0; + buffer_strcat(wb, "null"); + } + + buffer_sprintf(wb, "],\n" + " %sdimensions%s: %ld,\n" + " %spoints%s: %ld,\n" + " %sformat%s: %s" + , kq, kq, i + , kq, kq, rows + , kq, kq, sq + ); + + rrdr_buffer_print_format(wb, format); + + buffer_sprintf(wb, "%s,\n" + " %sdb_points_per_tier%s: [ " + , sq + , kq, kq + ); + + for(size_t tier = 0; tier < storage_tiers ; tier++) + buffer_sprintf(wb, "%s%zu", tier>0?", ":"", r->internal.tier_points_read[tier]); + + buffer_strcat(wb, " ]"); + + buffer_sprintf(wb, ",\n %sresult%s: ", kq, kq); + + if(string_value) buffer_strcat(wb, sq); + //info("JSONWRAPPER(): %s: END", r->st->id); +} + +void rrdr_json_wrapper_anomaly_rates(RRDR *r, BUFFER *wb, uint32_t format, uint32_t options, int string_value) { + (void)r; + (void)format; + + char kq[2] = "", // key quote + sq[2] = ""; // string quote + + if( options & RRDR_OPTION_GOOGLE_JSON ) { + kq[0] = '\0'; + sq[0] = '\''; + } + else { + kq[0] = '"'; + sq[0] = '"'; + } + + if(string_value) buffer_strcat(wb, sq); + + buffer_sprintf(wb, ",\n %sanomaly_rates%s: ", kq, kq); +} + +void rrdr_json_wrapper_end(RRDR *r, BUFFER *wb, uint32_t format, uint32_t options, int string_value) { + (void)format; + + char kq[2] = "", // key quote + sq[2] = ""; // string quote + + if( options & RRDR_OPTION_GOOGLE_JSON ) { + kq[0] = '\0'; + sq[0] = '\''; + } + else { + kq[0] = '"'; + sq[0] = '"'; + } + + if(string_value) buffer_strcat(wb, sq); + + buffer_sprintf(wb, ",\n %smin%s: ", kq, kq); + buffer_rrd_value(wb, r->min); + buffer_sprintf(wb, ",\n %smax%s: ", kq, kq); + buffer_rrd_value(wb, r->max); + buffer_strcat(wb, "\n}\n"); +} diff --git a/web/api/formatters/json_wrapper.h b/web/api/formatters/json_wrapper.h new file mode 100644 index 0000000..91c1475 --- /dev/null +++ b/web/api/formatters/json_wrapper.h @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_API_FORMATTER_JSON_WRAPPER_H +#define NETDATA_API_FORMATTER_JSON_WRAPPER_H + +#include "rrd2json.h" +#include "web/api/queries/query.h" + + +void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS options, int string_value, + RRDR_GROUPING group_method); +void rrdr_json_wrapper_anomaly_rates(RRDR *r, BUFFER *wb, uint32_t format, uint32_t options, int string_value); +void rrdr_json_wrapper_end(RRDR *r, BUFFER *wb, uint32_t format, uint32_t options, int string_value); + +#endif //NETDATA_API_FORMATTER_JSON_WRAPPER_H diff --git a/web/api/formatters/rrd2json.c b/web/api/formatters/rrd2json.c new file mode 100644 index 0000000..8bf5471 --- /dev/null +++ b/web/api/formatters/rrd2json.c @@ -0,0 +1,339 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "web/api/web_api_v1.h" +#include "database/storage_engine.h" + +void rrd_stats_api_v1_chart(RRDSET *st, BUFFER *wb) { + rrdset2json(st, wb, NULL, NULL, 0); +} + +void rrdr_buffer_print_format(BUFFER *wb, uint32_t format) { + switch(format) { + case DATASOURCE_JSON: + buffer_strcat(wb, DATASOURCE_FORMAT_JSON); + break; + + case DATASOURCE_DATATABLE_JSON: + buffer_strcat(wb, DATASOURCE_FORMAT_DATATABLE_JSON); + break; + + case DATASOURCE_DATATABLE_JSONP: + buffer_strcat(wb, DATASOURCE_FORMAT_DATATABLE_JSONP); + break; + + case DATASOURCE_JSONP: + buffer_strcat(wb, DATASOURCE_FORMAT_JSONP); + break; + + case DATASOURCE_SSV: + buffer_strcat(wb, DATASOURCE_FORMAT_SSV); + break; + + case DATASOURCE_CSV: + buffer_strcat(wb, DATASOURCE_FORMAT_CSV); + break; + + case DATASOURCE_TSV: + buffer_strcat(wb, DATASOURCE_FORMAT_TSV); + break; + + case DATASOURCE_HTML: + buffer_strcat(wb, DATASOURCE_FORMAT_HTML); + break; + + case DATASOURCE_JS_ARRAY: + buffer_strcat(wb, DATASOURCE_FORMAT_JS_ARRAY); + break; + + case DATASOURCE_SSV_COMMA: + buffer_strcat(wb, DATASOURCE_FORMAT_SSV_COMMA); + break; + + default: + buffer_strcat(wb, "unknown"); + break; + } +} + +int rrdset2value_api_v1( + RRDSET *st + , BUFFER *wb + , NETDATA_DOUBLE *n + , const char *dimensions + , size_t points + , time_t after + , time_t before + , RRDR_GROUPING group_method + , const char *group_options + , time_t resampling_time + , uint32_t options + , time_t *db_after + , time_t *db_before + , size_t *db_points_read + , size_t *db_points_per_tier + , size_t *result_points_generated + , int *value_is_null + , NETDATA_DOUBLE *anomaly_rate + , time_t timeout + , size_t tier + , QUERY_SOURCE query_source +) { + int ret = HTTP_RESP_INTERNAL_SERVER_ERROR; + + ONEWAYALLOC *owa = onewayalloc_create(0); + RRDR *r = rrd2rrdr_legacy( + owa, + st, + points, + after, + before, + group_method, + resampling_time, + options, + dimensions, + group_options, + timeout, + tier, + query_source); + + if(!r) { + if(value_is_null) *value_is_null = 1; + ret = HTTP_RESP_INTERNAL_SERVER_ERROR; + goto cleanup; + } + + if(db_points_read) + *db_points_read += r->internal.db_points_read; + + if(db_points_per_tier) { + for(size_t t = 0; t < storage_tiers ;t++) + db_points_per_tier[t] += r->internal.tier_points_read[t]; + } + + if(result_points_generated) + *result_points_generated += r->internal.result_points_generated; + + if(rrdr_rows(r) == 0) { + if(db_after) *db_after = 0; + if(db_before) *db_before = 0; + if(value_is_null) *value_is_null = 1; + + ret = HTTP_RESP_BAD_REQUEST; + goto cleanup; + } + + if(wb) { + if (r->result_options & RRDR_RESULT_OPTION_RELATIVE) + buffer_no_cacheable(wb); + else if (r->result_options & RRDR_RESULT_OPTION_ABSOLUTE) + buffer_cacheable(wb); + } + + if(db_after) *db_after = r->after; + if(db_before) *db_before = r->before; + + long i = (!(options & RRDR_OPTION_REVERSED))?(long)rrdr_rows(r) - 1:0; + *n = rrdr2value(r, i, options, value_is_null, anomaly_rate); + ret = HTTP_RESP_OK; + +cleanup: + rrdr_free(owa, r); + onewayalloc_destroy(owa); + return ret; +} + +int data_query_execute(ONEWAYALLOC *owa, BUFFER *wb, QUERY_TARGET *qt, time_t *latest_timestamp) { + + RRDR *r = rrd2rrdr(owa, qt); + if(!r) { + buffer_strcat(wb, "Cannot generate output with these parameters on this chart."); + return HTTP_RESP_INTERNAL_SERVER_ERROR; + } + + if (r->result_options & RRDR_RESULT_OPTION_CANCEL) { + rrdr_free(owa, r); + return HTTP_RESP_BACKEND_FETCH_FAILED; + } + + if(r->result_options & RRDR_RESULT_OPTION_RELATIVE) + buffer_no_cacheable(wb); + else if(r->result_options & RRDR_RESULT_OPTION_ABSOLUTE) + buffer_cacheable(wb); + + if(latest_timestamp && rrdr_rows(r) > 0) + *latest_timestamp = r->before; + + DATASOURCE_FORMAT format = qt->request.format; + RRDR_OPTIONS options = qt->request.options; + RRDR_GROUPING group_method = qt->request.group_method; + + switch(format) { + case DATASOURCE_SSV: + if(options & RRDR_OPTION_JSON_WRAP) { + wb->contenttype = CT_APPLICATION_JSON; + rrdr_json_wrapper_begin(r, wb, format, options, 1, group_method); + rrdr2ssv(r, wb, options, "", " ", ""); + rrdr_json_wrapper_end(r, wb, format, options, 1); + } + else { + wb->contenttype = CT_TEXT_PLAIN; + rrdr2ssv(r, wb, options, "", " ", ""); + } + break; + + case DATASOURCE_SSV_COMMA: + if(options & RRDR_OPTION_JSON_WRAP) { + wb->contenttype = CT_APPLICATION_JSON; + rrdr_json_wrapper_begin(r, wb, format, options, 1, group_method); + rrdr2ssv(r, wb, options, "", ",", ""); + rrdr_json_wrapper_end(r, wb, format, options, 1); + } + else { + wb->contenttype = CT_TEXT_PLAIN; + rrdr2ssv(r, wb, options, "", ",", ""); + } + break; + + case DATASOURCE_JS_ARRAY: + if(options & RRDR_OPTION_JSON_WRAP) { + wb->contenttype = CT_APPLICATION_JSON; + rrdr_json_wrapper_begin(r, wb, format, options, 0, group_method); + rrdr2ssv(r, wb, options, "[", ",", "]"); + rrdr_json_wrapper_end(r, wb, format, options, 0); + } + else { + wb->contenttype = CT_APPLICATION_JSON; + rrdr2ssv(r, wb, options, "[", ",", "]"); + } + break; + + case DATASOURCE_CSV: + if(options & RRDR_OPTION_JSON_WRAP) { + wb->contenttype = CT_APPLICATION_JSON; + rrdr_json_wrapper_begin(r, wb, format, options, 1, group_method); + rrdr2csv(r, wb, format, options, "", ",", "\\n", ""); + rrdr_json_wrapper_end(r, wb, format, options, 1); + } + else { + wb->contenttype = CT_TEXT_PLAIN; + rrdr2csv(r, wb, format, options, "", ",", "\r\n", ""); + } + break; + + case DATASOURCE_CSV_MARKDOWN: + if(options & RRDR_OPTION_JSON_WRAP) { + wb->contenttype = CT_APPLICATION_JSON; + rrdr_json_wrapper_begin(r, wb, format, options, 1, group_method); + rrdr2csv(r, wb, format, options, "", "|", "\\n", ""); + rrdr_json_wrapper_end(r, wb, format, options, 1); + } + else { + wb->contenttype = CT_TEXT_PLAIN; + rrdr2csv(r, wb, format, options, "", "|", "\r\n", ""); + } + break; + + case DATASOURCE_CSV_JSON_ARRAY: + wb->contenttype = CT_APPLICATION_JSON; + if(options & RRDR_OPTION_JSON_WRAP) { + rrdr_json_wrapper_begin(r, wb, format, options, 0, group_method); + buffer_strcat(wb, "[\n"); + rrdr2csv(r, wb, format, options + RRDR_OPTION_LABEL_QUOTES, "[", ",", "]", ",\n"); + buffer_strcat(wb, "\n]"); + rrdr_json_wrapper_end(r, wb, format, options, 0); + } + else { + wb->contenttype = CT_APPLICATION_JSON; + buffer_strcat(wb, "[\n"); + rrdr2csv(r, wb, format, options + RRDR_OPTION_LABEL_QUOTES, "[", ",", "]", ",\n"); + buffer_strcat(wb, "\n]"); + } + break; + + case DATASOURCE_TSV: + if(options & RRDR_OPTION_JSON_WRAP) { + wb->contenttype = CT_APPLICATION_JSON; + rrdr_json_wrapper_begin(r, wb, format, options, 1, group_method); + rrdr2csv(r, wb, format, options, "", "\t", "\\n", ""); + rrdr_json_wrapper_end(r, wb, format, options, 1); + } + else { + wb->contenttype = CT_TEXT_PLAIN; + rrdr2csv(r, wb, format, options, "", "\t", "\r\n", ""); + } + break; + + case DATASOURCE_HTML: + if(options & RRDR_OPTION_JSON_WRAP) { + wb->contenttype = CT_APPLICATION_JSON; + rrdr_json_wrapper_begin(r, wb, format, options, 1, group_method); + buffer_strcat(wb, "<html>\\n<center>\\n<table border=\\\"0\\\" cellpadding=\\\"5\\\" cellspacing=\\\"5\\\">\\n"); + rrdr2csv(r, wb, format, options, "<tr><td>", "</td><td>", "</td></tr>\\n", ""); + buffer_strcat(wb, "</table>\\n</center>\\n</html>\\n"); + rrdr_json_wrapper_end(r, wb, format, options, 1); + } + else { + wb->contenttype = CT_TEXT_HTML; + buffer_strcat(wb, "<html>\n<center>\n<table border=\"0\" cellpadding=\"5\" cellspacing=\"5\">\n"); + rrdr2csv(r, wb, format, options, "<tr><td>", "</td><td>", "</td></tr>\n", ""); + buffer_strcat(wb, "</table>\n</center>\n</html>\n"); + } + break; + + case DATASOURCE_DATATABLE_JSONP: + wb->contenttype = CT_APPLICATION_X_JAVASCRIPT; + + if(options & RRDR_OPTION_JSON_WRAP) + rrdr_json_wrapper_begin(r, wb, format, options, 0, group_method); + + rrdr2json(r, wb, options, 1); + + if(options & RRDR_OPTION_JSON_WRAP) + rrdr_json_wrapper_end(r, wb, format, options, 0); + break; + + case DATASOURCE_DATATABLE_JSON: + wb->contenttype = CT_APPLICATION_JSON; + + if(options & RRDR_OPTION_JSON_WRAP) + rrdr_json_wrapper_begin(r, wb, format, options, 0, group_method); + + rrdr2json(r, wb, options, 1); + + if(options & RRDR_OPTION_JSON_WRAP) + rrdr_json_wrapper_end(r, wb, format, options, 0); + break; + + case DATASOURCE_JSONP: + wb->contenttype = CT_APPLICATION_X_JAVASCRIPT; + if(options & RRDR_OPTION_JSON_WRAP) + rrdr_json_wrapper_begin(r, wb, format, options, 0, group_method); + + rrdr2json(r, wb, options, 0); + + if(options & RRDR_OPTION_JSON_WRAP) + rrdr_json_wrapper_end(r, wb, format, options, 0); + break; + + case DATASOURCE_JSON: + default: + wb->contenttype = CT_APPLICATION_JSON; + + if(options & RRDR_OPTION_JSON_WRAP) + rrdr_json_wrapper_begin(r, wb, format, options, 0, group_method); + + rrdr2json(r, wb, options, 0); + + if(options & RRDR_OPTION_JSON_WRAP) { + if(options & RRDR_OPTION_RETURN_JWAR) { + rrdr_json_wrapper_anomaly_rates(r, wb, format, options, 0); + rrdr2json(r, wb, options | RRDR_OPTION_INTERNAL_AR, 0); + } + rrdr_json_wrapper_end(r, wb, format, options, 0); + } + break; + } + + rrdr_free(owa, r); + return HTTP_RESP_OK; +} diff --git a/web/api/formatters/rrd2json.h b/web/api/formatters/rrd2json.h new file mode 100644 index 0000000..048281d --- /dev/null +++ b/web/api/formatters/rrd2json.h @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_RRD2JSON_H +#define NETDATA_RRD2JSON_H 1 + +#include "web/api/web_api_v1.h" + +#include "web/api/exporters/allmetrics.h" +#include "web/api/queries/rrdr.h" + +#include "web/api/formatters/csv/csv.h" +#include "web/api/formatters/ssv/ssv.h" +#include "web/api/formatters/json/json.h" +#include "web/api/formatters/value/value.h" + +#include "web/api/formatters/rrdset2json.h" +#include "web/api/formatters/charts2json.h" +#include "web/api/formatters/json_wrapper.h" + +#include "web/server/web_client.h" + +#define HOSTNAME_MAX 1024 + +#define API_RELATIVE_TIME_MAX (3 * 365 * 86400) + +// type of JSON generations +typedef enum { + DATASOURCE_JSON = 0, + DATASOURCE_DATATABLE_JSON = 1, + DATASOURCE_DATATABLE_JSONP = 2, + DATASOURCE_SSV = 3, + DATASOURCE_CSV = 4, + DATASOURCE_JSONP = 5, + DATASOURCE_TSV = 6, + DATASOURCE_HTML = 7, + DATASOURCE_JS_ARRAY = 8, + DATASOURCE_SSV_COMMA = 9, + DATASOURCE_CSV_JSON_ARRAY = 10, + DATASOURCE_CSV_MARKDOWN = 11, +} DATASOURCE_FORMAT; + +#define DATASOURCE_FORMAT_JSON "json" +#define DATASOURCE_FORMAT_DATATABLE_JSON "datatable" +#define DATASOURCE_FORMAT_DATATABLE_JSONP "datasource" +#define DATASOURCE_FORMAT_JSONP "jsonp" +#define DATASOURCE_FORMAT_SSV "ssv" +#define DATASOURCE_FORMAT_CSV "csv" +#define DATASOURCE_FORMAT_TSV "tsv" +#define DATASOURCE_FORMAT_HTML "html" +#define DATASOURCE_FORMAT_JS_ARRAY "array" +#define DATASOURCE_FORMAT_SSV_COMMA "ssvcomma" +#define DATASOURCE_FORMAT_CSV_JSON_ARRAY "csvjsonarray" +#define DATASOURCE_FORMAT_CSV_MARKDOWN "markdown" + +void rrd_stats_api_v1_chart(RRDSET *st, BUFFER *wb); +void rrdr_buffer_print_format(BUFFER *wb, uint32_t format); + +int data_query_execute(ONEWAYALLOC *owa, BUFFER *wb, struct query_target *qt, time_t *latest_timestamp); + +int rrdset2value_api_v1( + RRDSET *st + , BUFFER *wb + , NETDATA_DOUBLE *n + , const char *dimensions + , size_t points + , time_t after + , time_t before + , RRDR_GROUPING group_method + , const char *group_options + , time_t resampling_time + , uint32_t options + , time_t *db_after + , time_t *db_before + , size_t *db_points_read + , size_t *db_points_per_tier + , size_t *result_points_generated + , int *value_is_null + , NETDATA_DOUBLE *anomaly_rate + , time_t timeout + , size_t tier + , QUERY_SOURCE query_source +); + +#endif /* NETDATA_RRD2JSON_H */ diff --git a/web/api/formatters/rrdset2json.c b/web/api/formatters/rrdset2json.c new file mode 100644 index 0000000..1e81063 --- /dev/null +++ b/web/api/formatters/rrdset2json.c @@ -0,0 +1,155 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "rrdset2json.h" + +void chart_labels2json(RRDSET *st, BUFFER *wb, size_t indentation) +{ + if(unlikely(!st->rrdlabels)) + return; + + char tabs[11]; + + if (indentation > 10) + indentation = 10; + + tabs[0] = '\0'; + while (indentation) { + strcat(tabs, "\t\t"); + indentation--; + } + + rrdlabels_to_buffer(st->rrdlabels, wb, tabs, ":", "\"", ",\n", NULL, NULL, NULL, NULL); + buffer_strcat(wb, "\n"); +} + +// generate JSON for the /api/v1/chart API call + +void rrdset2json(RRDSET *st, BUFFER *wb, size_t *dimensions_count, size_t *memory_used, int skip_volatile) { + time_t first_entry_t = rrdset_first_entry_t(st); + time_t last_entry_t = rrdset_last_entry_t(st); + + buffer_sprintf( + wb, + "\t\t{\n" + "\t\t\t\"id\": \"%s\",\n" + "\t\t\t\"name\": \"%s\",\n" + "\t\t\t\"type\": \"%s\",\n" + "\t\t\t\"family\": \"%s\",\n" + "\t\t\t\"context\": \"%s\",\n" + "\t\t\t\"title\": \"%s (%s)\",\n" + "\t\t\t\"priority\": %ld,\n" + "\t\t\t\"plugin\": \"%s\",\n" + "\t\t\t\"module\": \"%s\",\n" + "\t\t\t\"units\": \"%s\",\n" + "\t\t\t\"data_url\": \"/api/v1/data?chart=%s\",\n" + "\t\t\t\"chart_type\": \"%s\",\n", + rrdset_id(st), + rrdset_name(st), + rrdset_parts_type(st), + rrdset_family(st), + rrdset_context(st), + rrdset_title(st), + rrdset_name(st), + st->priority, + rrdset_plugin_name(st), + rrdset_module_name(st), + rrdset_units(st), + rrdset_name(st), + rrdset_type_name(st->chart_type)); + + if (likely(!skip_volatile)) + buffer_sprintf( + wb, + "\t\t\t\"duration\": %"PRId64",\n", + (int64_t)(last_entry_t - first_entry_t + st->update_every) //st->entries * st->update_every + ); + + buffer_sprintf( + wb, + "\t\t\t\"first_entry\": %"PRId64",\n", + (int64_t)first_entry_t //rrdset_first_entry_t(st) + ); + + if (likely(!skip_volatile)) + buffer_sprintf( + wb, + "\t\t\t\"last_entry\": %"PRId64",\n", + (int64_t)last_entry_t //rrdset_last_entry_t(st) + ); + + buffer_sprintf( + wb, + "\t\t\t\"update_every\": %d,\n" + "\t\t\t\"dimensions\": {\n", + st->update_every); + + unsigned long memory = sizeof(RRDSET) + st->memsize; + + size_t dimensions = 0; + RRDDIM *rd; + rrddim_foreach_read(rd, st) { + if(rrddim_option_check(rd, RRDDIM_OPTION_HIDDEN) || rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE)) continue; + + memory += sizeof(RRDDIM) + rd->memsize; + + if (dimensions) + buffer_strcat(wb, ",\n\t\t\t\t\""); + else + buffer_strcat(wb, "\t\t\t\t\""); + buffer_strcat_jsonescape(wb, rrddim_id(rd)); + buffer_strcat(wb, "\": { \"name\": \""); + buffer_strcat_jsonescape(wb, rrddim_name(rd)); + buffer_strcat(wb, "\" }"); + + dimensions++; + } + rrddim_foreach_done(rd); + + if(dimensions_count) *dimensions_count += dimensions; + if(memory_used) *memory_used += memory; + + buffer_sprintf(wb, "\n\t\t\t},\n\t\t\t\"chart_variables\": "); + health_api_v1_chart_custom_variables2json(st, wb); + + buffer_strcat(wb, ",\n\t\t\t\"green\": "); + buffer_rrd_value(wb, st->green); + buffer_strcat(wb, ",\n\t\t\t\"red\": "); + buffer_rrd_value(wb, st->red); + + if (likely(!skip_volatile)) { + buffer_strcat(wb, ",\n\t\t\t\"alarms\": {\n"); + size_t alarms = 0; + RRDCALC *rc; + netdata_rwlock_rdlock(&st->alerts.rwlock); + DOUBLE_LINKED_LIST_FOREACH_FORWARD(st->alerts.base, rc, prev, next) { + buffer_sprintf( + wb, + "%s" + "\t\t\t\t\"%s\": {\n" + "\t\t\t\t\t\"id\": %u,\n" + "\t\t\t\t\t\"status\": \"%s\",\n" + "\t\t\t\t\t\"units\": \"%s\",\n" + "\t\t\t\t\t\"update_every\": %d\n" + "\t\t\t\t}", + (alarms) ? ",\n" : "", rrdcalc_name(rc), rc->id, rrdcalc_status2string(rc->status), rrdcalc_units(rc), + rc->update_every); + + alarms++; + } + netdata_rwlock_unlock(&st->alerts.rwlock); + buffer_sprintf(wb, + "\n\t\t\t}" + ); + } + buffer_strcat(wb, ",\n\t\t\t\"chart_labels\": {\n"); + chart_labels2json(st, wb, 2); + buffer_strcat(wb, "\t\t\t}"); + + buffer_strcat(wb, ",\n\t\t\t\"functions\": {\n"); + chart_functions2json(st, wb, 4, "\"", "\""); + buffer_strcat(wb, "\t\t\t}"); + + buffer_sprintf(wb, + "\n\t\t}" + ); +} diff --git a/web/api/formatters/rrdset2json.h b/web/api/formatters/rrdset2json.h new file mode 100644 index 0000000..b2908e2 --- /dev/null +++ b/web/api/formatters/rrdset2json.h @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_API_FORMATTER_RRDSET2JSON_H +#define NETDATA_API_FORMATTER_RRDSET2JSON_H + +#include "rrd2json.h" + +void rrdset2json(RRDSET *st, BUFFER *wb, size_t *dimensions_count, size_t *memory_used, int skip_volatile); + +#endif //NETDATA_API_FORMATTER_RRDSET2JSON_H diff --git a/web/api/formatters/ssv/Makefile.am b/web/api/formatters/ssv/Makefile.am new file mode 100644 index 0000000..161784b --- /dev/null +++ b/web/api/formatters/ssv/Makefile.am @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/web/api/formatters/ssv/README.md b/web/api/formatters/ssv/README.md new file mode 100644 index 0000000..d9e193d --- /dev/null +++ b/web/api/formatters/ssv/README.md @@ -0,0 +1,59 @@ +<!-- +title: "SSV formatter" +custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/formatters/ssv/README.md +--> + +# SSV formatter + +The SSV formatter sums all dimensions in [results of database queries](/web/api/queries/README.md) +to a single value and returns a list of such values showing how it changes through time. + +It supports the following formats: + +| format | content type | description | +|:----:|:----------:|:----------| +| `ssv` | text/plain | a space separated list of values | +| `ssvcomma` | text/plain | a comma separated list of values | +| `array` | application/json | a JSON array | + +The SSV formatter respects the following API `&options=`: + +| option | supported | description | +| :----:|:-------:|:----------| +| `nonzero` | yes | to return only the dimensions that have at least a non-zero value | +| `flip` | yes | to return the numbers older to newer (the default is newer to older) | +| `percent` | yes | to replace all values with their percentage over the row total | +| `abs` | yes | to turn all values positive, before using them | +| `min2max` | yes | to return the delta from the minimum value to the maximum value (across dimensions) | + +## Examples + +Get the average system CPU utilization of the last hour, in 6 values (one every 10 minutes): + +```bash +# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=system.cpu&format=ssv&after=-3600&points=6&group=average' +1.741352 1.6800467 1.769411 1.6761112 1.629862 1.6807968 +``` + +--- + +Get the total mysql bandwidth (in + out) for the last hour, in 6 values (one every 10 minutes): + +Netdata returns bandwidth in `kilobits`. + +```bash +# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=mysql_local.net&format=ssvcomma&after=-3600&points=6&group=sum&options=abs' +72618.7936215,72618.778889,72618.788084,72618.9195918,72618.7760612,72618.6712421 +``` + +--- + +Get the web server max connections for the last hour, in 12 values (one every 5 minutes) +in a JSON array: + +```bash +# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=nginx_local.connections&format=array&after=-3600&points=12&group=max' +[278,258,268,239,259,260,243,266,278,318,264,258] +``` + + diff --git a/web/api/formatters/ssv/ssv.c b/web/api/formatters/ssv/ssv.c new file mode 100644 index 0000000..d561980 --- /dev/null +++ b/web/api/formatters/ssv/ssv.c @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "ssv.h" + +void rrdr2ssv(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, const char *prefix, const char *separator, const char *suffix) { + //info("RRD2SSV(): %s: BEGIN", r->st->id); + long i; + + buffer_strcat(wb, prefix); + long start = 0, end = rrdr_rows(r), step = 1; + if(!(options & RRDR_OPTION_REVERSED)) { + start = rrdr_rows(r) - 1; + end = -1; + step = -1; + } + + // for each line in the array + for(i = start; i != end ;i += step) { + int all_values_are_null = 0; + NETDATA_DOUBLE v = rrdr2value(r, i, options, &all_values_are_null, NULL); + + if(likely(i != start)) { + if(r->min > v) r->min = v; + if(r->max < v) r->max = v; + } + else { + r->min = v; + r->max = v; + } + + if(likely(i != start)) + buffer_strcat(wb, separator); + + if(all_values_are_null) { + if(options & RRDR_OPTION_NULL2ZERO) + buffer_strcat(wb, "0"); + else + buffer_strcat(wb, "null"); + } + else + buffer_rrd_value(wb, v); + } + buffer_strcat(wb, suffix); + //info("RRD2SSV(): %s: END", r->st->id); +} diff --git a/web/api/formatters/ssv/ssv.h b/web/api/formatters/ssv/ssv.h new file mode 100644 index 0000000..f7d4a95 --- /dev/null +++ b/web/api/formatters/ssv/ssv.h @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_API_FORMATTER_SSV_H +#define NETDATA_API_FORMATTER_SSV_H + +#include "../rrd2json.h" + +void rrdr2ssv(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, const char *prefix, const char *separator, const char *suffix); + +#endif //NETDATA_API_FORMATTER_SSV_H diff --git a/web/api/formatters/value/Makefile.am b/web/api/formatters/value/Makefile.am new file mode 100644 index 0000000..161784b --- /dev/null +++ b/web/api/formatters/value/Makefile.am @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/web/api/formatters/value/README.md b/web/api/formatters/value/README.md new file mode 100644 index 0000000..a51e32d --- /dev/null +++ b/web/api/formatters/value/README.md @@ -0,0 +1,24 @@ +<!-- +title: "Value formatter" +custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/formatters/value/README.md +--> + +# Value formatter + +The Value formatter presents [results of database queries](/web/api/queries/README.md) as a single value. + +To calculate the single value to be returned, it sums the values of all dimensions. + +The Value formatter respects the following API `&options=`: + +| option | supported | description | +|:----: |:-------: |:---------- | +| `percent` | yes | to replace all values with their percentage over the row total| +| `abs` | yes | to turn all values positive, before using them | +| `min2max` | yes | to return the delta from the minimum value to the maximum value (across dimensions)| + +The Value formatter is not exposed by the API by itself. +Instead it is used by the [`ssv`](/web/api/formatters/ssv/README.md) formatter +and [health monitoring queries](/health/README.md). + + diff --git a/web/api/formatters/value/value.c b/web/api/formatters/value/value.c new file mode 100644 index 0000000..46a7130 --- /dev/null +++ b/web/api/formatters/value/value.c @@ -0,0 +1,162 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "value.h" + + +inline NETDATA_DOUBLE rrdr2value(RRDR *r, long i, RRDR_OPTIONS options, int *all_values_are_null, NETDATA_DOUBLE *anomaly_rate) { + QUERY_TARGET *qt = r->internal.qt; + long c; + const long used = qt->query.used; + + NETDATA_DOUBLE *cn = &r->v[ i * r->d ]; + RRDR_VALUE_FLAGS *co = &r->o[ i * r->d ]; + NETDATA_DOUBLE *ar = &r->ar[ i * r->d ]; + + NETDATA_DOUBLE sum = 0, min = 0, max = 0, v; + int all_null = 1, init = 1; + + NETDATA_DOUBLE total = 1; + NETDATA_DOUBLE total_anomaly_rate = 0; + + int set_min_max = 0; + if(unlikely(options & RRDR_OPTION_PERCENTAGE)) { + total = 0; + for (c = 0; c < used; c++) { + NETDATA_DOUBLE n = cn[c]; + + if(likely((options & RRDR_OPTION_ABSOLUTE) && n < 0)) + n = -n; + + total += n; + } + // prevent a division by zero + if(total == 0) total = 1; + set_min_max = 1; + } + + // for each dimension + for (c = 0; c < used; c++) { + if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue; + if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue; + + NETDATA_DOUBLE n = cn[c]; + + if(likely((options & RRDR_OPTION_ABSOLUTE) && n < 0)) + n = -n; + + if(unlikely(options & RRDR_OPTION_PERCENTAGE)) { + n = n * 100 / total; + + if(unlikely(set_min_max)) { + r->min = r->max = n; + set_min_max = 0; + } + + if(n < r->min) r->min = n; + if(n > r->max) r->max = n; + } + + if(unlikely(init)) { + if(n > 0) { + min = 0; + max = n; + } + else { + min = n; + max = 0; + } + init = 0; + } + + if(likely(!(co[c] & RRDR_VALUE_EMPTY))) { + all_null = 0; + sum += n; + } + + if(n < min) min = n; + if(n > max) max = n; + + total_anomaly_rate += ar[c]; + } + + if(anomaly_rate) { + if(!r->d) *anomaly_rate = 0; + else *anomaly_rate = total_anomaly_rate / (NETDATA_DOUBLE)r->d; + } + + if(unlikely(all_null)) { + if(likely(all_values_are_null)) + *all_values_are_null = 1; + return 0; + } + else { + if(likely(all_values_are_null)) + *all_values_are_null = 0; + } + + if(options & RRDR_OPTION_MIN2MAX) + v = max - min; + else + v = sum; + + return v; +} + +QUERY_VALUE rrdmetric2value(RRDHOST *host, + struct rrdcontext_acquired *rca, struct rrdinstance_acquired *ria, struct rrdmetric_acquired *rma, + time_t after, time_t before, + RRDR_OPTIONS options, RRDR_GROUPING group_method, const char *group_options, + size_t tier, time_t timeout, QUERY_SOURCE query_source +) { + QUERY_TARGET_REQUEST qtr = { + .host = host, + .rca = rca, + .ria = ria, + .rma = rma, + .after = after, + .before = before, + .points = 1, + .options = options, + .group_method = group_method, + .group_options = group_options, + .tier = tier, + .timeout = timeout, + .query_source = query_source, + }; + + ONEWAYALLOC *owa = onewayalloc_create(16 * 1024); + RRDR *r = rrd2rrdr(owa, query_target_create(&qtr)); + + QUERY_VALUE qv; + + if(!r || rrdr_rows(r) == 0) { + qv = (QUERY_VALUE) { + .value = NAN, + .anomaly_rate = NAN, + }; + } + else { + qv = (QUERY_VALUE) { + .after = r->after, + .before = r->before, + .points_read = r->internal.db_points_read, + .result_points = r->internal.result_points_generated, + }; + + for(size_t t = 0; t < storage_tiers ;t++) + qv.storage_points_per_tier[t] = r->internal.tier_points_read[t]; + + long i = (!(options & RRDR_OPTION_REVERSED))?(long)rrdr_rows(r) - 1:0; + int all_values_are_null = 0; + qv.value = rrdr2value(r, i, options, &all_values_are_null, &qv.anomaly_rate); + if(all_values_are_null) { + qv.value = NAN; + qv.anomaly_rate = NAN; + } + } + + rrdr_free(owa, r); + onewayalloc_destroy(owa); + + return qv; +} diff --git a/web/api/formatters/value/value.h b/web/api/formatters/value/value.h new file mode 100644 index 0000000..76b1869 --- /dev/null +++ b/web/api/formatters/value/value.h @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_API_FORMATTER_VALUE_H +#define NETDATA_API_FORMATTER_VALUE_H + +#include "../rrd2json.h" + +typedef struct storage_value { + NETDATA_DOUBLE value; + NETDATA_DOUBLE anomaly_rate; + time_t after; + time_t before; + size_t points_read; + size_t storage_points_per_tier[RRD_STORAGE_TIERS]; + size_t result_points; +} QUERY_VALUE; + +struct rrdmetric_acquired; +struct rrdinstance_acquired; +struct rrdcontext_acquired; + +QUERY_VALUE rrdmetric2value(RRDHOST *host, + struct rrdcontext_acquired *rca, struct rrdinstance_acquired *ria, struct rrdmetric_acquired *rma, + time_t after, time_t before, + RRDR_OPTIONS options, RRDR_GROUPING group_method, const char *group_options, + size_t tier, time_t timeout, QUERY_SOURCE query_source +); + +NETDATA_DOUBLE rrdr2value(RRDR *r, long i, RRDR_OPTIONS options, int *all_values_are_null, NETDATA_DOUBLE *anomaly_rate); + +#endif //NETDATA_API_FORMATTER_VALUE_H |