summaryrefslogtreecommitdiffstats
path: root/src/web/server
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-05 11:19:16 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-07-24 09:53:24 +0000
commitb5f8ee61a7f7e9bd291dd26b0585d03eb686c941 (patch)
treed4d31289c39fc00da064a825df13a0b98ce95b10 /src/web/server
parentAdding upstream version 1.44.3. (diff)
downloadnetdata-upstream.tar.xz
netdata-upstream.zip
Adding upstream version 1.46.3.upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/web/server')
-rw-r--r--src/web/server/README.md271
-rw-r--r--src/web/server/h2o/connlist.c157
-rw-r--r--src/web/server/h2o/connlist.h30
-rw-r--r--src/web/server/h2o/h2o_utils.c60
-rw-r--r--src/web/server/h2o/h2o_utils.h38
-rw-r--r--src/web/server/h2o/http_server.c421
-rw-r--r--src/web/server/h2o/http_server.h15
-rw-r--r--src/web/server/h2o/streaming.c384
-rw-r--r--src/web/server/h2o/streaming.h66
-rw-r--r--src/web/server/static/README.md21
-rw-r--r--src/web/server/static/static-threaded.c562
-rw-r--r--src/web/server/static/static-threaded.h10
-rw-r--r--src/web/server/web_client.c2081
-rw-r--r--src/web/server/web_client.h276
-rw-r--r--src/web/server/web_client_cache.c151
-rw-r--r--src/web/server/web_client_cache.h15
-rw-r--r--src/web/server/web_server.c151
-rw-r--r--src/web/server/web_server.h63
18 files changed, 4772 insertions, 0 deletions
diff --git a/src/web/server/README.md b/src/web/server/README.md
new file mode 100644
index 000000000..e40640875
--- /dev/null
+++ b/src/web/server/README.md
@@ -0,0 +1,271 @@
+# Web server
+
+The Netdata web server is `static-threaded`, with a fixed, configurable number of threads.
+
+All the threads are concurrently listening for web requests on the same sockets, and the kernel distributes the incoming
+requests to them. Each thread uses non-blocking I/O so it can serve any number of web requests in parallel.
+
+This web server respects the `keep-alive` HTTP header to serve multiple HTTP requests via the same connection.
+
+## Configuration
+
+From within your Netdata config directory (typically `/etc/netdata`), [use `edit-config`](/docs/netdata-agent/configuration/README.md) to
+open `netdata.conf`.
+
+```
+sudo ./edit-config netdata.conf
+```
+
+Scroll down to the `[web]` section to find the following settings.
+
+## Settings
+
+| Setting | Default | Description |
+|:-------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `ssl key` | `/etc/netdata/ssl/key.pem` | Declare the location of an SSL key to [enable HTTPS](#enable-httpstls-support). |
+| `ssl certificate` | `/etc/netdata/ssl/cert.pem` | Declare the location of an SSL certificate to [enable HTTPS](#enable-httpstls-support). |
+| `tls version` | `1.3` | Choose which TLS version to use. While all versions are allowed (`1` or `1.0`, `1.1`, `1.2` and `1.3`), we recommend `1.3` for the most secure encryption. If left blank, Netdata uses the highest available protocol version on your system. |
+| `tls ciphers` | `none` | Choose which TLS cipher to use. Options include `TLS_AES_256_GCM_SHA384`, `TLS_CHACHA20_POLY1305_SHA256`, and `TLS_AES_128_GCM_SHA256`. If left blank, Netdata uses the default cipher list for that protocol provided by your TLS implementation. |
+| `ses max window` | `15` | See [single exponential smoothing](/src/web/api/queries/ses/README.md). |
+| `des max window` | `15` | See [double exponential smoothing](/src/web/api/queries/des/README.md). |
+| `mode` | `static-threaded` | Turns on (`static-threaded` or off (`none`) the static-threaded web server. See the [example](#disable-the-web-server) to turn off the web server and disable the dashboard. |
+| `listen backlog` | `4096` | The port backlog. Check `man 2 listen`. |
+| `default port` | `19999` | The listen port for the static web server. |
+| `web files owner` | `netdata` | The user that owns the web static files. Netdata will refuse to serve a file that is not owned by this user, even if it has read access to that file. If the user given is not found, Netdata will only serve files owned by user given in `run as user`. |
+| `web files group` | `netdata` | If this is set, Netdata will check if the file is owned by this group and refuse to serve the file if it's not. |
+| `disconnect idle clients after seconds` | `60` | The time in seconds to disconnect web clients after being totally idle. |
+| `timeout for first request` | `60` | How long to wait for a client to send a request before closing the socket. Prevents slow request attacks. |
+| `accept a streaming request every seconds` | `0` | Can be used to set a limit on how often a parent node will accept streaming requests from child nodes in a [streaming and replication setup](/src/streaming/README.md). |
+| `respect do not track policy` | `no` | If set to `yes`, Netdata will respect the user's browser preferences for [Do Not Track](https://www.eff.org/issues/do-not-track) (DNT) and storing cookies. If DNT is _enabled_ in the browser, and this option is set to `yes`, nodes will not connect to any [registry](/src/registry/README.md). For certain browsers, users must disable DNT and change this option to `yes` for full functionality. |
+| `x-frame-options response header` | ` ` | Avoid [clickjacking attacks](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Frame-Options), by ensuring that the content is not embedded into other sites. |
+| `allow connections from` | `localhost *` | Declare which IP addresses or full-qualified domain names (FQDNs) are allowed to connect to the web server, including the [dashboard](/docs/dashboards-and-charts/README.md) or [HTTP API](/src/web/api/README.md). This is a global setting with higher priority to any of the ones below. |
+| `allow connections by dns` | `heuristic` | See the [access list examples](#access-lists) for details on using `allow` settings. |
+| `allow dashboard from` | `localhost *` | |
+| `allow dashboard by dns` | `heuristic` | |
+| `allow badges from` | `*` | |
+| `allow badges by dns` | `heuristic` | |
+| `allow streaming from` | `*` | |
+| `allow streaming by dns` | `heuristic` | |
+| `allow netdata.conf` | `localhost fd* 10.* 192.168.* 172.16.* 172.17.* 172.18.* 172.19.* 172.20.* 172.21.* 172.22.* 172.23.* 172.24.* 172.25.* 172.26.* 172.27.* 172.28.* 172.29.* 172.30.* 172.31.* UNKNOWN` | |
+| `allow netdata.conf by dns` | `no` | |
+| `allow management from` | `localhost` | |
+| `allow management by dns` | `heuristic` | |
+| `enable gzip compression` | `yes` | When set to `yes`, Netdata web responses will be GZIP compressed, if the web client accepts such responses. |
+| `gzip compression strategy` | `default` | Valid settings are `default`, `filtered`, `huffman only`, `rle` and `fixed`. |
+| `gzip compression level` | `3` | Valid settings are 1 (fastest) to 9 (best ratio). |
+| `web server threads` | ` ` | How many processor threads the web server is allowed. The default is system-specific, the minimum of `6` or the number of CPU cores. |
+| `web server max sockets` | ` ` | Available sockets. The default is system-specific, automatically adjusted to 50% of the max number of open files Netdata is allowed to use (via `/etc/security/limits.conf` or systemd), to allow enough file descriptors to be available for data collection. |
+| `custom dashboard_info.js` | ` ` | Specifies the location of a custom `dashboard.js` file. See [customizing the standard dashboard](/docs/developer-and-contributor-corner/customize.md#customize-the-standard-dashboard) for details. |
+
+## Examples
+
+### Disable the web server
+
+Disable the web server by editing `netdata.conf` and setting:
+
+```
+[web]
+ mode = none
+```
+
+### Change the number of threads
+
+Control the number of threads and sockets with the following settings:
+
+```
+[web]
+ web server threads = 4
+ web server max sockets = 512
+```
+
+### Binding Netdata to multiple ports
+
+Netdata can bind to multiple IPs and ports, offering access to different services on each. Up to 100 sockets can be used (increase it at compile time with `CFLAGS="-DMAX_LISTEN_FDS=200" ./netdata-installer.sh ...`).
+
+The ports to bind are controlled via `[web].bind to`, like this:
+
+```
+[web]
+ default port = 19999
+ bind to = 127.0.0.1=dashboard^SSL=optional 10.1.1.1:19998=management|netdata.conf hostname:19997=badges [::]:19996=streaming^SSL=force localhost:19995=registry *:http=dashboard unix:/run/netdata/netdata.sock
+```
+
+Using the above, Netdata will bind to:
+
+- IPv4 127.0.0.1 at port 19999 (port was used from `default port`). Only the UI (dashboard) and the read API will be accessible on this port. Both HTTP and HTTPS requests will be accepted.
+- IPv4 10.1.1.1 at port 19998. The management API and `netdata.conf` will be accessible on this port.
+- All the IPs `hostname` resolves to (both IPv4 and IPv6 depending on the resolved IPs) at port 19997. Only badges will be accessible on this port.
+- All IPv6 IPs at port 19996. Only metric streaming requests from other Netdata agents will be accepted on this port. Only encrypted streams will be allowed (i.e. child nodes also need to be [configured for TLS](/src/streaming/README.md).
+- All the IPs `localhost` resolves to (both IPv4 and IPv6 depending the resolved IPs) at port 19996. This port will only accept registry API requests.
+- All IPv4 and IPv6 IPs at port `http` as set in `/etc/services`. Only the UI (dashboard) and the read API will be accessible on this port.
+- Unix domain socket `/run/netdata/netdata.sock`. All requests are serviceable on this socket. Note that in some OSs like Fedora, every service sees a different `/tmp`, so don't create a Unix socket under `/tmp`. `/run` or `/var/run` is suggested.
+
+The option `[web].default port` is used when an entries in `[web].bind to` do not specify a port.
+
+Note that the access permissions specified with the `=request type|request type|...` format are available from version 1.12 onwards.
+As shown in the example above, these permissions are optional, with the default being to permit all request types on the specified port.
+The request types are strings identical to the `allow X from` directives of the access lists, i.e. `dashboard`, `streaming`, `registry`, `netdata.conf`, `badges` and `management`.
+The access lists themselves and the general setting `allow connections from` in the next section are applied regardless of the ports that are configured to provide these services.
+The API requests are serviced as follows:
+
+- `dashboard` gives access to the UI, the read API and badges API calls.
+- `badges` gives access only to the badges API calls.
+- `management` gives access only to the management API calls.
+
+### Enable HTTPS/TLS support
+
+Since v1.16.0, Netdata supports encrypted HTTP connections to the web server, plus encryption of streaming data to a
+parent from its child nodes, via the TLS protocol.
+
+Inbound unix socket connections are unaffected, regardless of the TLS settings.
+
+> While Netdata uses Transport Layer Security (TLS) 1.2 to encrypt communications rather than the obsolete SSL protocol,
+> it's still common practice to refer to encrypted web connections as `SSL`. Many vendors, like Nginx and even Netdata
+> itself, use `SSL` in configuration files, whereas documentation will always refer to encrypted communications as `TLS`
+> or `TLS/SSL`.
+
+To enable TLS, provide the path to your certificate and private key in the `[web]` section of `netdata.conf`:
+
+```conf
+[web]
+ ssl key = /etc/netdata/ssl/key.pem
+ ssl certificate = /etc/netdata/ssl/cert.pem
+```
+
+Both files must be readable by the `netdata` user. If either of these files do not exist or are unreadable, Netdata will fall back to HTTP. For a parent-child connection, only the parent needs these settings.
+
+For test purposes, generate self-signed certificates with the following command:
+
+```bash
+openssl req -newkey rsa:2048 -nodes -sha512 -x509 -days 365 -keyout key.pem -out cert.pem
+```
+
+> If you use 4096 bits for your key and the certificate, Netdata will need more CPU to process the communication.
+> `rsa4096` can be up to 4 times slower than `rsa2048`, so we recommend using 2048 bits. Verify the difference
+> by running:
+>
+> ```sh
+> openssl speed rsa2048 rsa4096
+> ```
+
+### Select TLS version
+
+Beginning with version `v1.21.0`, specify the TLS version and the ciphers that you want to use:
+
+```conf
+[web]
+ tls version = 1.3
+ tls ciphers = TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256:TLS_AES_128_GCM_SHA256
+```
+
+If you do not specify these options, Netdata will use the highest available protocol version on your system and the default cipher list for that protocol provided by your TLS implementation.
+
+#### TLS/SSL enforcement
+
+When the certificates are defined and unless any other options are provided, a Netdata server will:
+
+- Redirect all incoming HTTP web server requests to HTTPS. Applies to the dashboard, the API, `netdata.conf` and badges.
+- Allow incoming child connections to use both unencrypted and encrypted communications for streaming.
+
+To change this behavior, you need to modify the `bind to` setting in the `[web]` section of `netdata.conf`. At the end of each port definition, append `^SSL=force` or `^SSL=optional`. What happens with these settings differs, depending on whether the port is used for HTTP/S requests, or for streaming.
+
+| SSL setting | HTTP requests|HTTPS requests|Unencrypted Streams|Encrypted Streams|
+|:---------:|:-----------:|:------------:|:-----------------:|:----------------|
+| none | Redirected to HTTPS|Accepted|Accepted|Accepted|
+| `force`| Redirected to HTTPS|Accepted|Denied|Accepted|
+| `optional`| Accepted|Accepted|Accepted|Accepted|
+
+Example:
+
+```
+[web]
+ bind to = *=dashboard|registry|badges|management|streaming|netdata.conf^SSL=force
+```
+
+For information how to configure the child to use TLS, check [securing the communication](/src/streaming/README.md#securing-streaming-communications) in the streaming documentation. There you will find additional details on the expected behavior for client and server nodes, when their respective TLS options are enabled.
+
+When we define the use of SSL in a Netdata agent for different ports, Netdata will apply the behavior specified on each port. For example, using the configuration line below:
+
+```
+[web]
+ bind to = *=dashboard|registry|badges|management|streaming|netdata.conf^SSL=force *:20000=netdata.conf^SSL=optional *:20001=dashboard|registry
+```
+
+Netdata will:
+
+- Force all HTTP requests to the default port to be redirected to HTTPS (same port).
+- Refuse unencrypted streaming connections from child nodes on the default port.
+- Allow both HTTP and HTTPS requests to port 20000 for `netdata.conf`
+- Force HTTP requests to port 20001 to be redirected to HTTPS (same port). Only allow requests for the dashboard, the read API and the registry on port 20001.
+
+#### TLS/SSL errors
+
+When you start using Netdata with TLS, you may find errors in the Netdata log, which is stored at `/var/log/netdata/error.log` by default.
+
+Most of the time, these errors are due to incompatibilities between your browser's options related to TLS/SSL protocols and Netdata's internal configuration. The most common error is `error:00000006:lib(0):func(0):EVP lib`.
+
+In the near future, Netdata will allow our users to change the internal configuration to avoid similar errors. Until then, we're recommending only the most common and safe encryption protocols listed above.
+
+### Access lists
+
+Netdata supports access lists in `netdata.conf`:
+
+```
+[web]
+ allow connections from = localhost *
+ allow dashboard from = localhost *
+ allow badges from = *
+ allow streaming from = *
+ allow netdata.conf from = localhost fd* 10.* 192.168.* 172.16.* 172.17.* 172.18.* 172.19.* 172.20.* 172.21.* 172.22.* 172.23.* 172.24.* 172.25.* 172.26.* 172.27.* 172.28.* 172.29.* 172.30.* 172.31.*
+ allow management from = localhost
+```
+
+`*` does string matches on the IPs or FQDNs of the clients.
+
+- `allow connections from` matches anyone that connects on the Netdata port(s).
+ So, if someone is not allowed, it will be connected and disconnected immediately, without reading even
+ a single byte from its connection. This is a global setting with higher priority to any of the ones below.
+
+- `allow dashboard from` receives the request and examines if it is a static dashboard file or an API call the
+ dashboards do.
+
+- `allow badges from` checks if the API request is for a badge. Badges are not matched by `allow dashboard from`.
+
+- `allow streaming from` checks if the child willing to stream metrics to this Netdata is allowed.
+ This can be controlled per API KEY and MACHINE GUID in `stream.conf`.
+ The setting in `netdata.conf` is checked before the ones in `stream.conf`.
+
+- `allow netdata.conf from` checks the IP to allow `http://netdata.host:19999/netdata.conf`.
+ The IPs listed are all the private IPv4 addresses, including link local IPv6 addresses. Keep in mind that connections to Netdata API ports are filtered by `allow connections from`. So, IPs allowed by `allow netdata.conf from` should also be allowed by `allow connections from`.
+
+- `allow management from` checks the IPs to allow API management calls. Management via the API is currently supported for [health](/src/web/api/health/README.md#health-management-api)
+
+In order to check the FQDN of the connection without opening the Netdata agent to DNS-spoofing, a reverse-dns record
+must be setup for the connecting host. At connection time the reverse-dns of the peer IP address is resolved, and
+a forward DNS resolution is made to validate the IP address against the name-pattern.
+
+Please note that this process can be expensive on a machine that is serving many connections. Each access list has an
+associated configuration option to turn off DNS-based patterns completely to avoid incurring this cost at run-time:
+
+```
+ allow connections by dns = heuristic
+ allow dashboard by dns = heuristic
+ allow badges by dns = heuristic
+ allow streaming by dns = heuristic
+ allow netdata.conf by dns = no
+ allow management by dns = heuristic
+```
+
+The three possible values for each of these options are `yes`, `no` and `heuristic`. The `heuristic` option disables
+the check when the pattern only contains IPv4/IPv6 addresses or `localhost`, and enables it when wildcards are
+present that may match DNS FQDNs.
+
+## DDoS protection
+
+If you publish your Netdata web server to the internet, you may want to apply some protection against DDoS:
+
+1. Use the `static-threaded` web server (it is the default)
+2. Use reasonable `[web].web server max sockets` (the default is)
+3. Don't use all your CPU cores for Netdata (lower `[web].web server threads`)
+4. Run the `netdata` process with a low process scheduling priority (the default is the lowest)
+5. If possible, proxy Netdata via a full featured web server (Nginx, Apache, etc)
diff --git a/src/web/server/h2o/connlist.c b/src/web/server/h2o/connlist.c
new file mode 100644
index 000000000..5df12b86b
--- /dev/null
+++ b/src/web/server/h2o/connlist.c
@@ -0,0 +1,157 @@
+#include "libnetdata/libnetdata.h"
+#include "connlist.h"
+
+conn_list_t conn_list = { NULL, NULL, 0, 0, PTHREAD_MUTEX_INITIALIZER };
+
+static h2o_stream_conn_t **conn_list_get_null_element_unsafe(conn_list_t *list)
+{
+ struct conn_list_leaf *leaf = list->head;
+ while (leaf != NULL) {
+ for (int i = 0; i < CONN_LIST_MEMPOOL_SIZE; i++) {
+ if (leaf->conn[i] == NULL)
+ return &leaf->conn[i];
+ }
+ leaf = leaf->next;
+ }
+ return NULL;
+}
+
+void conn_list_insert(conn_list_t *list, h2o_stream_conn_t *conn)
+{
+ pthread_mutex_lock(&list->lock);
+
+ // in case the allocated capacity is not used up
+ // we can reuse the null element
+ if (list->capacity != list->size) {
+ h2o_stream_conn_t **null_element = conn_list_get_null_element_unsafe(list);
+ if (unlikely(null_element == NULL)) {
+ pthread_mutex_unlock(&list->lock);
+ error_report("conn_list_insert: capacity != size but no null element found");
+ return;
+ }
+ *null_element = conn;
+ list->size++;
+ pthread_mutex_unlock(&list->lock);
+ return;
+ }
+
+ // if not, we need to allocate a new leaf
+ struct conn_list_leaf *old_tail = list->tail;
+ list->tail = callocz(1, sizeof(struct conn_list_leaf));
+ if (unlikely(old_tail == NULL))
+ list->head = list->tail;
+ else
+ old_tail->next = list->tail;
+
+ list->tail->conn[0] = conn;
+ list->size++;
+ list->capacity += CONN_LIST_MEMPOOL_SIZE;
+
+ pthread_mutex_unlock(&list->lock);
+}
+
+typedef struct {
+ conn_list_t *list;
+ struct conn_list_leaf *leaf;
+ int idx;
+} conn_list_iter_t;
+
+static inline void conn_list_iter_create_unsafe(conn_list_iter_t *iter, conn_list_t *list)
+{
+ iter->list = list;
+ iter->leaf = list->head;
+ iter->idx = 0;
+}
+
+static inline int conn_list_iter_next_unsafe(conn_list_iter_t *iter, h2o_stream_conn_t **conn)
+{
+ if (unlikely(iter->idx == iter->list->capacity))
+ return 0;
+
+ if (iter->idx && iter->idx % CONN_LIST_MEMPOOL_SIZE == 0) {
+ iter->leaf = iter->leaf->next;
+ }
+
+ *conn = iter->leaf->conn[iter->idx++ % CONN_LIST_MEMPOOL_SIZE];
+ return 1;
+}
+
+void conn_list_iter_all(conn_list_t *list, void (*cb)(h2o_stream_conn_t *conn))
+{
+ pthread_mutex_lock(&list->lock);
+ conn_list_iter_t iter;
+ conn_list_iter_create_unsafe(&iter, list);
+ h2o_stream_conn_t *conn;
+ while (conn_list_iter_next_unsafe(&iter, &conn)) {
+ if (conn == NULL)
+ continue;
+ cb(conn);
+ }
+ pthread_mutex_unlock(&list->lock);
+}
+
+static void conn_list_garbage_collect_unsafe(conn_list_t *list)
+{
+ if (list->capacity - list->size > CONN_LIST_MEMPOOL_SIZE) {
+ struct conn_list_leaf *new_tail = list->head;
+ while (new_tail->next != list->tail)
+ new_tail = new_tail->next;
+
+ // check if the tail leaf is empty and move the data if not
+ for (int i = 0; i < CONN_LIST_MEMPOOL_SIZE; i++) {
+ if (list->tail->conn[i] != NULL) {
+ h2o_stream_conn_t **null_element = conn_list_get_null_element_unsafe(list);
+ if (unlikely(null_element == NULL)) {
+ error_report("conn_list_garbage_collect_unsafe: list->capacity - list->size > CONN_LIST_MEMPOOL_SIZE but no null element found?");
+ return;
+ }
+ *null_element = list->tail->conn[i];
+ list->tail->conn[i] = NULL;
+ }
+ }
+
+ freez(list->tail);
+ new_tail->next = NULL;
+ list->tail = new_tail;
+ list->capacity -= CONN_LIST_MEMPOOL_SIZE;
+ }
+}
+
+static inline int conn_list_iter_remove(conn_list_iter_t *iter, h2o_stream_conn_t *conn)
+{
+ if (unlikely(iter->idx == iter->list->capacity))
+ return -1;
+
+ if (iter->idx && iter->idx % CONN_LIST_MEMPOOL_SIZE == 0) {
+ iter->leaf = iter->leaf->next;
+ }
+
+ if(conn == iter->leaf->conn[iter->idx % CONN_LIST_MEMPOOL_SIZE]) {
+ iter->leaf->conn[iter->idx % CONN_LIST_MEMPOOL_SIZE] = NULL;
+
+ iter->idx++;
+ return 1;
+ }
+
+ iter->idx++;
+ return 0;
+}
+
+int conn_list_remove_conn(conn_list_t *list, h2o_stream_conn_t *conn)
+{
+ pthread_mutex_lock(&list->lock);
+ conn_list_iter_t iter;
+ conn_list_iter_create_unsafe(&iter, list);
+ int rc;
+ while (!(rc = conn_list_iter_remove(&iter, conn)));
+ if (rc == -1) {
+ pthread_mutex_unlock(&list->lock);
+ error_report("conn_list_remove_conn: conn not found");
+ return 0;
+ }
+ list->size--;
+ conn_list_garbage_collect_unsafe(list);
+ pthread_mutex_unlock(&list->lock);
+ return 1;
+}
+
diff --git a/src/web/server/h2o/connlist.h b/src/web/server/h2o/connlist.h
new file mode 100644
index 000000000..8848b85be
--- /dev/null
+++ b/src/web/server/h2o/connlist.h
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef HTTPD_CONNLIST_H
+#define HTTPD_CONNLIST_H
+
+#include "streaming.h"
+
+// (-1) in following macro is to keep conn list + next pointer
+// be power of 2
+#define CONN_LIST_MEMPOOL_SIZE ((2^5)-1)
+struct conn_list_leaf {
+ h2o_stream_conn_t *conn[CONN_LIST_MEMPOOL_SIZE];
+ struct conn_list_leaf *next;
+};
+
+typedef struct {
+ struct conn_list_leaf *head;
+ struct conn_list_leaf *tail;
+ int size;
+ int capacity;
+ pthread_mutex_t lock;
+} conn_list_t;
+
+extern conn_list_t conn_list;
+
+void conn_list_insert(conn_list_t *list, h2o_stream_conn_t *conn);
+void conn_list_iter_all(conn_list_t *list, void (*cb)(h2o_stream_conn_t *conn));
+int conn_list_remove_conn(conn_list_t *list, h2o_stream_conn_t *conn);
+
+#endif /* HTTPD_CONNLIST_H */
diff --git a/src/web/server/h2o/h2o_utils.c b/src/web/server/h2o/h2o_utils.c
new file mode 100644
index 000000000..3038b6156
--- /dev/null
+++ b/src/web/server/h2o/h2o_utils.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "h2o_utils.h"
+
+#include "h2o/string_.h"
+
+#include "libnetdata/libnetdata.h"
+
+char *iovec_to_cstr(h2o_iovec_t *str)
+{
+ char *c_str = mallocz(str->len + 1);
+ memcpy(c_str, str->base, str->len);
+ c_str[str->len] = 0;
+ return c_str;
+}
+
+#define KEY_VAL_BUFFER_GROWTH_STEP 5
+h2o_iovec_pair_vector_t *parse_URL_params(h2o_mem_pool_t *pool, h2o_iovec_t params_string)
+{
+ h2o_iovec_pair_vector_t *params_vec = h2o_mem_alloc_shared(pool, sizeof(h2o_iovec_pair_vector_t), NULL);
+ memset(params_vec, 0, sizeof(h2o_iovec_pair_vector_t));
+
+ h2o_iovec_pair_t param;
+ while ((param.name.base = (char*)h2o_next_token(&params_string, '&', &param.name.len, &param.value)) != NULL) {
+ if (params_vec->capacity == params_vec->size)
+ h2o_vector_reserve(pool, params_vec, params_vec->capacity + KEY_VAL_BUFFER_GROWTH_STEP);
+
+ params_vec->entries[params_vec->size++] = param;
+ }
+
+ return params_vec;
+}
+
+h2o_iovec_pair_t *get_URL_param_by_name(h2o_iovec_pair_vector_t *params_vec, const void *needle, size_t needle_len)
+{
+ for (size_t i = 0; i < params_vec->size; i++) {
+ h2o_iovec_pair_t *ret = &params_vec->entries[i];
+ if (h2o_memis(ret->name.base, ret->name.len, needle, needle_len))
+ return ret;
+ }
+ return NULL;
+}
+
+char *url_unescape(const char *url)
+{
+ char *result = mallocz(strlen(url) + 1);
+
+ int i, j;
+ for (i = 0, j = 0; url[i] != 0; i++, j++) {
+ if (url[i] == '%' && isxdigit(url[i+1]) && isxdigit(url[i+2])) {
+ char hex[3] = { url[i+1], url[i+2], 0 };
+ result[j] = strtol(hex, NULL, 16);
+ i += 2;
+ } else
+ result[j] = url[i];
+ }
+ result[j] = 0;
+
+ return result;
+}
diff --git a/src/web/server/h2o/h2o_utils.h b/src/web/server/h2o/h2o_utils.h
new file mode 100644
index 000000000..6760ed9a9
--- /dev/null
+++ b/src/web/server/h2o/h2o_utils.h
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_H2O_UTILS_H
+#define NETDATA_H2O_UTILS_H
+
+#include "h2o/memory.h"
+
+#define __HAS_URL_PARAMS(reqptr) ((reqptr)->query_at != SIZE_MAX && ((reqptr)->path.len - (reqptr)->query_at > 1))
+#define IF_HAS_URL_PARAMS(reqptr) if __HAS_URL_PARAMS(reqptr)
+#define UNLESS_HAS_URL_PARAMS(reqptr) if (!__HAS_URL_PARAMS(reqptr))
+#define URL_PARAMS_IOVEC_INIT(reqptr) { .base = &(reqptr)->path.base[(reqptr)->query_at + 1], \
+ .len = (reqptr)->path.len - (reqptr)->query_at - 1 }
+#define URL_PARAMS_IOVEC_INIT_WITH_QUESTIONMARK(reqptr) { .base = &(reqptr)->path.base[(reqptr)->query_at], \
+ .len = (reqptr)->path.len - (reqptr)->query_at }
+
+#define PRINTF_H2O_IOVEC_FMT "%.*s"
+#define PRINTF_H2O_IOVEC(iovec) ((int)(iovec)->len), ((iovec)->base)
+
+char *iovec_to_cstr(h2o_iovec_t *str);
+
+typedef struct h2o_iovec_pair {
+ h2o_iovec_t name;
+ h2o_iovec_t value;
+} h2o_iovec_pair_t;
+
+typedef H2O_VECTOR(h2o_iovec_pair_t) h2o_iovec_pair_vector_t;
+
+// Takes the part of url behind ? (the url encoded parameters)
+// and parse it to vector of name/value pairs without copying the actual strings
+h2o_iovec_pair_vector_t *parse_URL_params(h2o_mem_pool_t *pool, h2o_iovec_t params_string);
+
+// Searches for parameter by name (provided in needle)
+// returns pointer to it or NULL
+h2o_iovec_pair_t *get_URL_param_by_name(h2o_iovec_pair_vector_t *params_vec, const void *needle, size_t needle_len);
+
+char *url_unescape(const char *url);
+
+#endif /* NETDATA_H2O_UTILS_H */
diff --git a/src/web/server/h2o/http_server.c b/src/web/server/h2o/http_server.c
new file mode 100644
index 000000000..a079c6afe
--- /dev/null
+++ b/src/web/server/h2o/http_server.c
@@ -0,0 +1,421 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "daemon/common.h"
+#include "streaming/common.h"
+#include "http_server.h"
+
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#pragma GCC diagnostic ignored "-Wunused-but-set-variable"
+#pragma GCC diagnostic ignored "-Wtype-limits"
+#include "h2o.h"
+#include "h2o/http1.h"
+#pragma GCC diagnostic pop
+
+#include "streaming.h"
+#include "h2o_utils.h"
+
+static h2o_globalconf_t config;
+static h2o_context_t ctx;
+static h2o_accept_ctx_t accept_ctx;
+
+#define CONTENT_JSON_UTF8 H2O_STRLIT("application/json; charset=utf-8")
+#define CONTENT_TEXT_UTF8 H2O_STRLIT("text/plain; charset=utf-8")
+#define NBUF_INITIAL_SIZE_RESP (4096)
+#define API_V1_PREFIX "/api/v1/"
+#define API_V2_PREFIX "/api/v2/"
+#define HOST_SELECT_PREFIX "/host/"
+
+#define HTTPD_CONFIG_SECTION "httpd"
+#define HTTPD_ENABLED_DEFAULT false
+
+static void on_accept(h2o_socket_t *listener, const char *err)
+{
+ h2o_socket_t *sock;
+
+ if (err != NULL) {
+ return;
+ }
+
+ if ((sock = h2o_evloop_socket_accept(listener)) == NULL)
+ return;
+ h2o_accept(&accept_ctx, sock);
+}
+
+static int create_listener(const char *ip, int port)
+{
+ struct sockaddr_in addr;
+ int fd, reuseaddr_flag = 1;
+ h2o_socket_t *sock;
+
+ memset(&addr, 0, sizeof(addr));
+ addr.sin_family = AF_INET;
+ addr.sin_addr.s_addr = inet_addr(ip);
+ addr.sin_port = htons(port);
+
+ if ((fd = socket(AF_INET, SOCK_STREAM, 0)) == -1 ||
+ setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &reuseaddr_flag, sizeof(reuseaddr_flag)) != 0 ||
+ bind(fd, (struct sockaddr *)&addr, sizeof(addr)) != 0 || listen(fd, SOMAXCONN) != 0) {
+ if (fd != -1)
+ close(fd);
+ return -1;
+ }
+
+ sock = h2o_evloop_socket_create(ctx.loop, fd, H2O_SOCKET_FLAG_DONT_READ);
+ h2o_socket_read_start(sock, on_accept);
+
+ return 0;
+}
+
+static int ssl_init()
+{
+ if (!config_get_boolean(HTTPD_CONFIG_SECTION, "ssl", false))
+ return 0;
+
+ char default_fn[FILENAME_MAX + 1];
+
+ snprintfz(default_fn, FILENAME_MAX, "%s/ssl/key.pem", netdata_configured_user_config_dir);
+ const char *key_fn = config_get(HTTPD_CONFIG_SECTION, "ssl key", default_fn);
+
+ snprintfz(default_fn, FILENAME_MAX, "%s/ssl/cert.pem", netdata_configured_user_config_dir);
+ const char *cert_fn = config_get(HTTPD_CONFIG_SECTION, "ssl certificate", default_fn);
+
+#if OPENSSL_VERSION_NUMBER < OPENSSL_VERSION_110
+ accept_ctx.ssl_ctx = SSL_CTX_new(SSLv23_server_method());
+#else
+ accept_ctx.ssl_ctx = SSL_CTX_new(TLS_server_method());
+#endif
+ if (!accept_ctx.ssl_ctx) {
+ netdata_log_error("Could not allocate a new SSL_CTX");
+ return -1;
+ }
+
+ SSL_CTX_set_options(accept_ctx.ssl_ctx, SSL_OP_NO_SSLv2);
+
+ /* load certificate and private key */
+ if (SSL_CTX_use_PrivateKey_file(accept_ctx.ssl_ctx, key_fn, SSL_FILETYPE_PEM) != 1) {
+ netdata_log_error("Could not load server key from \"%s\"", key_fn);
+ return -1;
+ }
+ if (SSL_CTX_use_certificate_file(accept_ctx.ssl_ctx, cert_fn, SSL_FILETYPE_PEM) != 1) {
+ netdata_log_error("Could not load certificate from \"%s\"", cert_fn);
+ return -1;
+ }
+
+ h2o_ssl_register_alpn_protocols(accept_ctx.ssl_ctx, h2o_http2_alpn_protocols);
+
+ netdata_log_info("SSL support enabled");
+
+ return 0;
+}
+
+// I did not find a way to do wildcard paths to make common handler for urls like:
+// /api/v1/info
+// /host/child/api/v1/info
+// /host/uuid/api/v1/info
+// ideally we could do something like "/*/api/v1/info" subscription
+// so we do it "manually" here with uberhandler
+static inline int _netdata_uberhandler(h2o_req_t *req, RRDHOST **host)
+{
+ if (!h2o_memis(req->method.base, req->method.len, H2O_STRLIT("GET")))
+ return -1;
+
+ static h2o_generator_t generator = { NULL, NULL };
+
+ h2o_iovec_t norm_path = req->path_normalized;
+
+ if (norm_path.len > strlen(HOST_SELECT_PREFIX) && !memcmp(norm_path.base, HOST_SELECT_PREFIX, strlen(HOST_SELECT_PREFIX))) {
+ h2o_iovec_t host_id; // host_id can be either and UUID or a hostname of the child
+
+ norm_path.base += strlen(HOST_SELECT_PREFIX);
+ norm_path.len -= strlen(HOST_SELECT_PREFIX);
+
+ host_id = norm_path;
+
+ size_t end_loc = h2o_strstr(host_id.base, host_id.len, "/", 1);
+ if (end_loc != SIZE_MAX) {
+ host_id.len = end_loc;
+ norm_path.base += end_loc;
+ norm_path.len -= end_loc;
+ }
+
+ char *c_host_id = iovec_to_cstr(&host_id);
+ *host = rrdhost_find_by_hostname(c_host_id);
+ if (!*host)
+ *host = rrdhost_find_by_guid(c_host_id);
+ if (!*host)
+ *host = find_host_by_node_id(c_host_id);
+ if (!*host) {
+ req->res.status = HTTP_RESP_BAD_REQUEST;
+ req->res.reason = "Wrong host id";
+ h2o_send_inline(req, H2O_STRLIT("Host id provided was not found!\n"));
+ freez(c_host_id);
+ return 0;
+ }
+ freez(c_host_id);
+
+ // we have to rewrite URL here in case this is not an api call
+ // so that the subsequent file upload handler can send the correct
+ // files to the client
+ // if this is not an API call we will abort this handler later
+ // and let the internal serve file handler of h2o care for things
+
+ if (end_loc == SIZE_MAX) {
+ req->path.len = 1;
+ req->path_normalized.len = 1;
+ } else {
+ size_t offset = norm_path.base - req->path_normalized.base;
+ req->path.len -= offset;
+ req->path.base += offset;
+ req->query_at -= offset;
+ req->path_normalized.len -= offset;
+ req->path_normalized.base += offset;
+ }
+ }
+
+ // workaround for a dashboard bug which causes sometimes urls like
+ // "//api/v1/info" to be caled instead of "/api/v1/info"
+ if (norm_path.len > 2 &&
+ norm_path.base[0] == '/' &&
+ norm_path.base[1] == '/' ) {
+ norm_path.base++;
+ norm_path.len--;
+ }
+
+ unsigned int api_version = 2;
+ size_t api_loc = h2o_strstr(norm_path.base, norm_path.len, H2O_STRLIT(API_V2_PREFIX));
+ if (api_loc == SIZE_MAX) {
+ api_version = 1;
+ api_loc = h2o_strstr(norm_path.base, norm_path.len, H2O_STRLIT(API_V1_PREFIX));
+ if (api_loc == SIZE_MAX)
+ return 1;
+ }
+
+ // API_V1_PREFIX and API_V2_PREFIX are the same length
+ // but I did this just in case someone changes the length of the prefix in future
+ // so he will not be shot in the leg here
+ // until then compiler will optimize this out
+ size_t api_len = api_version == 1 ? strlen(API_V1_PREFIX) : strlen(API_V2_PREFIX);
+
+ h2o_iovec_t api_command = norm_path;
+ api_command.base += api_loc + api_len;
+ api_command.len -= api_loc + api_len;
+
+ if (!api_command.len)
+ return 1;
+
+ // TODO - get a web_client from the cache
+ // this (emulating struct web_client) is a hack and will be removed
+ // in future PRs but needs bigger changes in old http_api_v1
+ // we need to make the web_client_api_request_v1 to be web server
+ // agnostic and remove the old webservers dependency creep into the
+ // individual response generators and thus remove the need to "emulate"
+ // the old webserver calling this function here and in ACLK
+ struct web_client w;
+ memset(&w, 0, sizeof(w));
+ w.response.data = buffer_create(NBUF_INITIAL_SIZE_RESP, NULL);
+ w.response.header = buffer_create(NBUF_INITIAL_SIZE_RESP, NULL);
+ w.url_query_string_decoded = buffer_create(NBUF_INITIAL_SIZE_RESP, NULL);
+ w.url_as_received = buffer_create(NBUF_INITIAL_SIZE_RESP, NULL);
+ w.port_acl = HTTP_ACL_H2O | HTTP_ACL_ALL_FEATURES;
+ w.acl = w.port_acl; // TODO - web_client_update_acl_matches(w) to restrict this based on user configuration
+
+ char *path_c_str = iovec_to_cstr(&api_command);
+ char *path_unescaped = url_unescape(path_c_str);
+ buffer_strcat(w.url_as_received, iovec_to_cstr(&norm_path));
+ freez(path_c_str);
+
+ IF_HAS_URL_PARAMS(req) {
+ h2o_iovec_t query_params = URL_PARAMS_IOVEC_INIT_WITH_QUESTIONMARK(req);
+ char *query_c_str = iovec_to_cstr(&query_params);
+ char *query_unescaped = url_unescape(query_c_str);
+ freez(query_c_str);
+ buffer_strcat(w.url_query_string_decoded, query_unescaped);
+ freez(query_unescaped);
+ }
+
+//inline int web_client_api_request_v2(RRDHOST *host, struct web_client *w, char *url_path_endpoint) {
+ if (api_version == 2)
+ web_client_api_request_v2(*host, &w, path_unescaped);
+ else
+ web_client_api_request_v1(*host, &w, path_unescaped);
+ freez(path_unescaped);
+
+ // we move msg body to req->pool managed memory as it has to
+ // live until whole response has been encrypted and sent
+ // when req is finished memory will be freed with the pool
+ h2o_iovec_t body;
+ {
+ BUFFER *wb = w.response.data;
+ body.base = wb->buffer;
+ body.len = wb->len;
+
+ void *managed = h2o_mem_alloc_shared(&req->pool, body.len, NULL);
+ memcpy(managed, body.base, body.len);
+ body.base = managed;
+ }
+
+ req->res.status = HTTP_RESP_OK;
+ req->res.reason = "OK";
+ if (w.response.data->content_type == CT_APPLICATION_JSON)
+ h2o_add_header(&req->pool, &req->res.headers, H2O_TOKEN_CONTENT_TYPE, NULL, CONTENT_JSON_UTF8);
+ else
+ h2o_add_header(&req->pool, &req->res.headers, H2O_TOKEN_CONTENT_TYPE, NULL, CONTENT_TEXT_UTF8);
+ h2o_start_response(req, &generator);
+ h2o_send(req, &body, 1, H2O_SEND_STATE_FINAL);
+
+ buffer_free(w.response.data);
+ buffer_free(w.response.header);
+ buffer_free(w.url_query_string_decoded);
+ buffer_free(w.url_as_received);
+
+ return 0;
+}
+
+static int netdata_uberhandler(h2o_handler_t *self, h2o_req_t *req)
+{
+ UNUSED(self);
+ RRDHOST *host = localhost;
+
+ int ret = _netdata_uberhandler(req, &host);
+
+ if (!ret) {
+ char host_uuid_str[UUID_STR_LEN];
+
+ if (host != NULL)
+ uuid_unparse_lower(host->host_uuid, host_uuid_str);
+
+ nd_log(NDLS_ACCESS, NDLP_DEBUG, "HTTPD OK method: " PRINTF_H2O_IOVEC_FMT
+ ", path: " PRINTF_H2O_IOVEC_FMT
+ ", as host: %s"
+ ", response: %d",
+ PRINTF_H2O_IOVEC(&req->method),
+ PRINTF_H2O_IOVEC(&req->input.path),
+ host == NULL ? "unknown" : (localhost ? "localhost" : host_uuid_str),
+ req->res.status);
+ } else {
+ nd_log(NDLS_ACCESS, NDLP_DEBUG, "HTTPD %d"
+ " method: " PRINTF_H2O_IOVEC_FMT
+ ", path: " PRINTF_H2O_IOVEC_FMT
+ ", forwarding to file handler as path: " PRINTF_H2O_IOVEC_FMT,
+ ret,
+ PRINTF_H2O_IOVEC(&req->method),
+ PRINTF_H2O_IOVEC(&req->input.path),
+ PRINTF_H2O_IOVEC(&req->path));
+ }
+
+ return ret;
+}
+
+static int hdl_netdata_conf(h2o_handler_t *self, h2o_req_t *req)
+{
+ UNUSED(self);
+ if (!h2o_memis(req->method.base, req->method.len, H2O_STRLIT("GET")))
+ return -1;
+
+ BUFFER *buf = buffer_create(NBUF_INITIAL_SIZE_RESP, NULL);
+ config_generate(buf, 0);
+
+ void *managed = h2o_mem_alloc_shared(&req->pool, buf->len, NULL);
+ memcpy(managed, buf->buffer, buf->len);
+
+ req->res.status = HTTP_RESP_OK;
+ req->res.reason = "OK";
+ h2o_add_header(&req->pool, &req->res.headers, H2O_TOKEN_CONTENT_TYPE, NULL, CONTENT_TEXT_UTF8);
+ h2o_send_inline(req, managed, buf->len);
+ buffer_free(buf);
+
+ return 0;
+}
+
+static int hdl_stream(h2o_handler_t *self, h2o_req_t *req)
+{
+ UNUSED(self);
+ netdata_log_info("Streaming request trough h2o received");
+ h2o_stream_conn_t *conn = mallocz(sizeof(*conn));
+ h2o_stream_conn_t_init(conn);
+
+ if (is_streaming_handshake(req)) {
+ h2o_stream_conn_t_destroy(conn);
+ freez(conn);
+ return 1;
+ }
+
+ /* build response */
+ req->res.status = HTTP_RESP_SWITCH_PROTO;
+ req->res.reason = "Switching Protocols";
+ h2o_add_header(&req->pool, &req->res.headers, H2O_TOKEN_UPGRADE, NULL, H2O_STRLIT(NETDATA_STREAM_PROTO_NAME));
+
+// TODO we should consider adding some nonce header here
+// h2o_add_header_by_str(&req->pool, &req->res.headers, H2O_STRLIT("whatever reply"), 0, NULL, accept_key,
+// strlen(accept_key));
+
+ h2o_http1_upgrade(req, NULL, 0, stream_on_complete, conn);
+
+ return 0;
+}
+
+#define POLL_INTERVAL 100
+
+void *h2o_main(void *ptr) {
+ struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
+
+ h2o_pathconf_t *pathconf;
+ h2o_hostconf_t *hostconf;
+
+ const char *bind_addr = config_get(HTTPD_CONFIG_SECTION, "bind to", "127.0.0.1");
+ int bind_port = config_get_number(HTTPD_CONFIG_SECTION, "port", 19998);
+
+ h2o_config_init(&config);
+ hostconf = h2o_config_register_host(&config, h2o_iovec_init(H2O_STRLIT("default")), bind_port);
+
+ pathconf = h2o_config_register_path(hostconf, "/netdata.conf", 0);
+ h2o_handler_t *handler = h2o_create_handler(pathconf, sizeof(*handler));
+ handler->on_req = hdl_netdata_conf;
+
+ pathconf = h2o_config_register_path(hostconf, NETDATA_STREAM_URL, 0);
+ handler = h2o_create_handler(pathconf, sizeof(*handler));
+ handler->on_req = hdl_stream;
+
+ pathconf = h2o_config_register_path(hostconf, "/", 0);
+ handler = h2o_create_handler(pathconf, sizeof(*handler));
+ handler->on_req = netdata_uberhandler;
+ h2o_file_register(pathconf, netdata_configured_web_dir, NULL, NULL, H2O_FILE_FLAG_SEND_COMPRESSED);
+
+ h2o_context_init(&ctx, h2o_evloop_create(), &config);
+
+ if(ssl_init()) {
+ error_report("SSL was requested but could not be properly initialized. Aborting.");
+ return NULL;
+ }
+
+ accept_ctx.ctx = &ctx;
+ accept_ctx.hosts = config.hosts;
+
+ if (create_listener(bind_addr, bind_port) != 0) {
+ netdata_log_error("failed to create listener %s:%d", bind_addr, bind_port);
+ return NULL;
+ }
+
+ usec_t last_wpoll = now_monotonic_usec();
+ while (service_running(SERVICE_HTTPD)) {
+ int rc = h2o_evloop_run(ctx.loop, POLL_INTERVAL);
+ if (rc < 0 && errno != EINTR) {
+ netdata_log_error("h2o_evloop_run returned (%d) with errno other than EINTR. Aborting", rc);
+ break;
+ }
+ usec_t now = now_monotonic_usec();
+ if (now - last_wpoll > POLL_INTERVAL * USEC_PER_MS) {
+ last_wpoll = now;
+
+ h2o_stream_check_pending_write_reqs();
+ }
+ }
+
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+ return NULL;
+}
+
+int httpd_is_enabled() {
+ return config_get_boolean(HTTPD_CONFIG_SECTION, "enabled", HTTPD_ENABLED_DEFAULT);
+}
diff --git a/src/web/server/h2o/http_server.h b/src/web/server/h2o/http_server.h
new file mode 100644
index 000000000..28d1c560a
--- /dev/null
+++ b/src/web/server/h2o/http_server.h
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef HTTP_SERVER_H
+#define HTTP_SERVER_H
+
+#include <stddef.h>
+
+void *h2o_main(void * ptr);
+
+int h2o_stream_write(void *ctx, const char *data, size_t data_len);
+size_t h2o_stream_read(void *ctx, char *buf, size_t read_bytes);
+
+int httpd_is_enabled();
+
+#endif /* HTTP_SERVER_H */
diff --git a/src/web/server/h2o/streaming.c b/src/web/server/h2o/streaming.c
new file mode 100644
index 000000000..fbe3f8050
--- /dev/null
+++ b/src/web/server/h2o/streaming.c
@@ -0,0 +1,384 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "daemon/common.h"
+#include "streaming.h"
+#include "connlist.h"
+#include "h2o_utils.h"
+#include "streaming/common.h"
+
+static int pending_write_reqs = 0;
+
+#define H2O2STREAM_BUF_SIZE (1024 * 1024)
+
+// h2o_stream_conn_t related functions
+void h2o_stream_conn_t_init(h2o_stream_conn_t *conn)
+{
+ memset(conn, 0, sizeof(*conn));
+ conn->rx = rbuf_create(H2O2STREAM_BUF_SIZE);
+ conn->tx = rbuf_create(H2O2STREAM_BUF_SIZE);
+
+ pthread_mutex_init(&conn->rx_buf_lock, NULL);
+ pthread_mutex_init(&conn->tx_buf_lock, NULL);
+ pthread_cond_init(&conn->rx_buf_cond, NULL);
+ // no need to check for NULL as rbuf_create uses mallocz internally
+}
+
+void h2o_stream_conn_t_destroy(h2o_stream_conn_t *conn)
+{
+ rbuf_free(conn->rx);
+ rbuf_free(conn->tx);
+
+ freez(conn->url);
+ freez(conn->user_agent);
+
+ pthread_mutex_destroy(&conn->rx_buf_lock);
+ pthread_mutex_destroy(&conn->tx_buf_lock);
+ pthread_cond_destroy(&conn->rx_buf_cond);
+}
+
+// streaming upgrade related functions
+int is_streaming_handshake(h2o_req_t *req)
+{
+ /* method */
+ if (!h2o_memis(req->input.method.base, req->input.method.len, H2O_STRLIT("GET")))
+ return 1;
+
+ if (!h2o_memis(req->path_normalized.base, req->path_normalized.len, H2O_STRLIT(NETDATA_STREAM_URL))) {
+ return 1;
+ }
+
+ /* upgrade header */
+ if (req->upgrade.base == NULL || !h2o_lcstris(req->upgrade.base, req->upgrade.len, H2O_STRLIT(NETDATA_STREAM_PROTO_NAME)))
+ return 1;
+
+ // TODO consider adding some key in form of random number
+ // to prevent caching on route especially if TLS is not used
+ // e.g. client sends random number
+ // server replies with it xored
+
+ return 0;
+}
+
+static void stream_on_close(h2o_stream_conn_t *conn);
+void stream_process(h2o_stream_conn_t *conn, int initial);
+
+void stream_on_complete(void *user_data, h2o_socket_t *sock, size_t reqsize)
+{
+ h2o_stream_conn_t *conn = user_data;
+
+ /* close the connection on error */
+ if (sock == NULL) {
+ stream_on_close(conn);
+ return;
+ }
+
+ conn->sock = sock;
+ sock->data = conn;
+
+ conn_list_insert(&conn_list, conn);
+
+ h2o_buffer_consume(&sock->input, reqsize);
+ stream_process(conn, 1);
+}
+
+// handling of active streams
+static void stream_on_close(h2o_stream_conn_t *conn)
+{
+ if (conn->sock != NULL)
+ h2o_socket_close(conn->sock);
+
+ conn_list_remove_conn(&conn_list, conn);
+
+ pthread_mutex_lock(&conn->rx_buf_lock);
+ conn->shutdown = 1;
+ pthread_cond_broadcast(&conn->rx_buf_cond);
+ pthread_mutex_unlock(&conn->rx_buf_lock);
+
+ h2o_stream_conn_t_destroy(conn);
+ freez(conn);
+}
+
+static void on_write_complete(h2o_socket_t *sock, const char *err)
+{
+ h2o_stream_conn_t *conn = sock->data;
+
+ if (err != NULL) {
+ stream_on_close(conn);
+ error_report("Streaming connection error \"%s\"", err);
+ return;
+ }
+
+ pthread_mutex_lock(&conn->tx_buf_lock);
+
+ rbuf_bump_tail(conn->tx, conn->tx_buf.len);
+
+ conn->tx_buf.base = NULL;
+ conn->tx_buf.len = 0;
+
+ pthread_mutex_unlock(&conn->tx_buf_lock);
+
+ stream_process(conn, 0);
+}
+
+static void stream_on_recv(h2o_socket_t *sock, const char *err)
+{
+ h2o_stream_conn_t *conn = sock->data;
+
+ if (err != NULL) {
+ stream_on_close(conn);
+ error_report("Streaming connection error \"%s\"", err);
+ return;
+ }
+ stream_process(conn, 0);
+}
+
+#define PARSE_DONE 1
+#define PARSE_ERROR -1
+#define GIMME_MORE_OF_DEM_SWEET_BYTEZ 0
+
+#define STREAM_METHOD "STREAM "
+#define USER_AGENT "User-Agent: "
+
+#define NEED_MIN_BYTES(buf, bytes) do { \
+ if(rbuf_bytes_available(buf) < bytes) \
+ return GIMME_MORE_OF_DEM_SWEET_BYTEZ;\
+} while(0)
+
+// TODO check in streaming code this is probably defined somewhere already
+#define MAX_LEN_STREAM_HELLO (1024*2)
+
+static int process_STREAM_X_HTTP_1_1(http_stream_parse_state_t *parser_state, rbuf_t buf, char **url, char **user_agent)
+{
+ int idx;
+ switch(*parser_state) {
+ case HTTP_STREAM:
+ NEED_MIN_BYTES(buf, strlen(STREAM_METHOD));
+ if (rbuf_memcmp_n(buf, H2O_STRLIT(STREAM_METHOD))) {
+ error_report("Expected \"%s\"", STREAM_METHOD);
+ return PARSE_ERROR;
+ }
+ rbuf_bump_tail(buf, strlen(STREAM_METHOD));
+ *parser_state = HTTP_URL;
+ /* FALLTHROUGH */
+ case HTTP_URL:
+ if (!rbuf_find_bytes(buf, " ", 1, &idx)) {
+ if (rbuf_bytes_available(buf) >= MAX_LEN_STREAM_HELLO) {
+ error_report("The initial \"STREAM [URL]" HTTP_1_1 "\" over max of %d", MAX_LEN_STREAM_HELLO);
+ return PARSE_ERROR;
+ }
+ }
+ *url = mallocz(idx + 1);
+ rbuf_pop(buf, *url, idx);
+ (*url)[idx] = 0;
+
+ *parser_state = HTTP_PROTO;
+ /* FALLTHROUGH */
+ case HTTP_PROTO:
+ NEED_MIN_BYTES(buf, strlen(HTTP_1_1));
+ if (rbuf_memcmp_n(buf, H2O_STRLIT(HTTP_1_1))) {
+ error_report("Expected \"%s\"", HTTP_1_1);
+ return PARSE_ERROR;
+ }
+ rbuf_bump_tail(buf, strlen(HTTP_1_1));
+ *parser_state = HTTP_USER_AGENT_KEY;
+ /* FALLTHROUGH */
+ case HTTP_USER_AGENT_KEY:
+ // and OF COURSE EVERYTHING is passed in URL except
+ // for user agent which we need and is passed as HTTP header
+ // not worth writing a parser for this so we manually extract
+ // just the single header we need and skip everything else
+ if (!rbuf_find_bytes(buf, USER_AGENT, strlen(USER_AGENT), &idx)) {
+ if (rbuf_bytes_available(buf) >= (size_t)(rbuf_get_capacity(buf) * 0.9)) {
+ error_report("The initial \"STREAM [URL]" HTTP_1_1 "\" over max of %d", MAX_LEN_STREAM_HELLO);
+ return PARSE_ERROR;
+ }
+ return GIMME_MORE_OF_DEM_SWEET_BYTEZ;
+ }
+ rbuf_bump_tail(buf, idx + strlen(USER_AGENT));
+ *parser_state = HTTP_USER_AGENT_VALUE;
+ /* FALLTHROUGH */
+ case HTTP_USER_AGENT_VALUE:
+ if (!rbuf_find_bytes(buf, "\r\n", 2, &idx)) {
+ if (rbuf_bytes_available(buf) >= (size_t)(rbuf_get_capacity(buf) * 0.9)) {
+ error_report("The initial \"STREAM [URL]" HTTP_1_1 "\" over max of %d", MAX_LEN_STREAM_HELLO);
+ return PARSE_ERROR;
+ }
+ return GIMME_MORE_OF_DEM_SWEET_BYTEZ;
+ }
+
+ *user_agent = mallocz(idx + 1);
+ rbuf_pop(buf, *user_agent, idx);
+ (*user_agent)[idx] = 0;
+
+ *parser_state = HTTP_HDR;
+ /* FALLTHROUGH */
+ case HTTP_HDR:
+ if (!rbuf_find_bytes(buf, HTTP_HDR_END, strlen(HTTP_HDR_END), &idx)) {
+ if (rbuf_bytes_available(buf) >= (size_t)(rbuf_get_capacity(buf) * 0.9)) {
+ error_report("The initial \"STREAM [URL]" HTTP_1_1 "\" over max of %d", MAX_LEN_STREAM_HELLO);
+ return PARSE_ERROR;
+ }
+ return GIMME_MORE_OF_DEM_SWEET_BYTEZ;
+ }
+ rbuf_bump_tail(buf, idx + strlen(HTTP_HDR_END));
+
+ *parser_state = HTTP_DONE;
+ return PARSE_DONE;
+ case HTTP_DONE:
+ error_report("Parsing is done. No need to call again.");
+ return PARSE_DONE;
+ default:
+ error_report("Unknown parser state %d", (int)*parser_state);
+ return PARSE_ERROR;
+ }
+}
+
+#define SINGLE_WRITE_MAX (1024)
+
+void stream_process(h2o_stream_conn_t *conn, int initial)
+{
+ int rc;
+ struct web_client w;
+
+ pthread_mutex_lock(&conn->tx_buf_lock);
+ if (h2o_socket_is_writing(conn->sock) || rbuf_bytes_available(conn->tx)) {
+ if (rbuf_bytes_available(conn->tx) && !conn->tx_buf.base) {
+ conn->tx_buf.base = rbuf_get_linear_read_range(conn->tx, &conn->tx_buf.len);
+ if (conn->tx_buf.base) {
+ conn->tx_buf.len = MIN(conn->tx_buf.len, SINGLE_WRITE_MAX);
+ h2o_socket_write(conn->sock, &conn->tx_buf, 1, on_write_complete);
+ }
+ }
+ }
+ pthread_mutex_unlock(&conn->tx_buf_lock);
+
+ if (initial)
+ h2o_socket_read_start(conn->sock, stream_on_recv);
+
+ if (conn->sock->input->size) {
+ size_t insert_max;
+ pthread_mutex_lock(&conn->rx_buf_lock);
+ char *insert_loc = rbuf_get_linear_insert_range(conn->rx, &insert_max);
+ if (insert_loc == NULL) {
+ pthread_cond_broadcast(&conn->rx_buf_cond);
+ pthread_mutex_unlock(&conn->rx_buf_lock);
+ return;
+ }
+ insert_max = MIN(insert_max, conn->sock->input->size);
+ memcpy(insert_loc, conn->sock->input->bytes, insert_max);
+ rbuf_bump_head(conn->rx, insert_max);
+
+ h2o_buffer_consume(&conn->sock->input, insert_max);
+
+ pthread_cond_broadcast(&conn->rx_buf_cond);
+ pthread_mutex_unlock(&conn->rx_buf_lock);
+ }
+
+ switch (conn->state) {
+ case STREAM_X_HTTP_1_1:
+ // no conn->rx lock here as at this point we are still single threaded
+ // until we call rrdpush_receiver_thread_spawn() later down
+ rc = process_STREAM_X_HTTP_1_1(&conn->parse_state, conn->rx, &conn->url, &conn->user_agent);
+ if (rc == PARSE_ERROR) {
+ error_report("error parsing the STREAM hello");
+ break;
+ }
+ if (rc != PARSE_DONE)
+ break;
+ conn->state = STREAM_X_HTTP_1_1_DONE;
+ /* FALLTHROUGH */
+ case STREAM_X_HTTP_1_1_DONE:
+ memset(&w, 0, sizeof(w));
+ w.response.data = buffer_create(1024, NULL);
+
+ // get client ip from the conn->sock
+ struct sockaddr client;
+ socklen_t len = h2o_socket_getpeername(conn->sock, &client);
+ char peername[NI_MAXHOST];
+ size_t peername_len = h2o_socket_getnumerichost(&client, len, peername);
+ size_t cpy_len = sizeof(w.client_ip) < peername_len ? sizeof(w.client_ip) : peername_len;
+ memcpy(w.client_ip, peername, cpy_len);
+ w.client_ip[cpy_len - 1] = 0;
+ w.user_agent = conn->user_agent;
+
+ rc = rrdpush_receiver_thread_spawn(&w, conn->url, conn);
+ if (rc != HTTP_RESP_OK) {
+ error_report("HTTPD Failed to spawn the receiver thread %d", rc);
+ conn->state = STREAM_CLOSE;
+ stream_on_close(conn);
+ } else {
+ conn->state = STREAM_ACTIVE;
+ }
+ buffer_free(w.response.data);
+ /* FALLTHROUGH */
+ case STREAM_ACTIVE:
+ break;
+ default:
+ error_report("Unknown conn->state");
+ }
+}
+
+// read and write functions to be used by streaming parser
+int h2o_stream_write(void *ctx, const char *data, size_t data_len)
+{
+ h2o_stream_conn_t *conn = (h2o_stream_conn_t *)ctx;
+
+ pthread_mutex_lock(&conn->tx_buf_lock);
+ size_t avail = rbuf_bytes_free(conn->tx);
+ avail = MIN(avail, data_len);
+ rbuf_push(conn->tx, data, avail);
+ pthread_mutex_unlock(&conn->tx_buf_lock);
+ __atomic_add_fetch(&pending_write_reqs, 1, __ATOMIC_SEQ_CST);
+ return avail;
+}
+
+size_t h2o_stream_read(void *ctx, char *buf, size_t read_bytes)
+{
+ int ret;
+ h2o_stream_conn_t *conn = (h2o_stream_conn_t *)ctx;
+
+ pthread_mutex_lock(&conn->rx_buf_lock);
+ size_t avail = rbuf_bytes_available(conn->rx);
+
+ if (!avail) {
+ if (conn->shutdown) {
+ pthread_mutex_unlock(&conn->rx_buf_lock);
+ return -1;
+ }
+ pthread_cond_wait(&conn->rx_buf_cond, &conn->rx_buf_lock);
+ if (conn->shutdown) {
+ pthread_mutex_unlock(&conn->rx_buf_lock);
+ return -1;
+ }
+ avail = rbuf_bytes_available(conn->rx);
+ if (!avail) {
+ pthread_mutex_unlock(&conn->rx_buf_lock);
+ return 0;
+ }
+ }
+
+ avail = MIN(avail, read_bytes);
+
+ ret = rbuf_pop(conn->rx, buf, avail);
+ pthread_mutex_unlock(&conn->rx_buf_lock);
+
+ return ret;
+}
+
+// periodic check for pending write requests
+void check_tx_buf(h2o_stream_conn_t *conn)
+{
+ pthread_mutex_lock(&conn->tx_buf_lock);
+ if (rbuf_bytes_available(conn->tx)) {
+ pthread_mutex_unlock(&conn->tx_buf_lock);
+ stream_process(conn, 0);
+ } else
+ pthread_mutex_unlock(&conn->tx_buf_lock);
+}
+
+void h2o_stream_check_pending_write_reqs(void)
+{
+ int _write_reqs = __atomic_exchange_n(&pending_write_reqs, 0, __ATOMIC_SEQ_CST);
+ if (_write_reqs > 0)
+ conn_list_iter_all(&conn_list, check_tx_buf);
+}
diff --git a/src/web/server/h2o/streaming.h b/src/web/server/h2o/streaming.h
new file mode 100644
index 000000000..dfc7b68fc
--- /dev/null
+++ b/src/web/server/h2o/streaming.h
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef HTTPD_STREAMING_H
+#define HTTPD_STREAMING_H
+
+#include "aclk/mqtt_websockets/c-rbuf/cringbuffer.h"
+
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#pragma GCC diagnostic ignored "-Wunused-but-set-variable"
+#pragma GCC diagnostic ignored "-Wtype-limits"
+#include "h2o.h"
+#pragma GCC diagnostic pop
+
+typedef enum {
+ STREAM_X_HTTP_1_1 = 0,
+ STREAM_X_HTTP_1_1_DONE,
+ STREAM_ACTIVE,
+ STREAM_CLOSE
+} h2o_stream_state_t;
+
+typedef enum {
+ HTTP_STREAM = 0,
+ HTTP_URL,
+ HTTP_PROTO,
+ HTTP_USER_AGENT_KEY,
+ HTTP_USER_AGENT_VALUE,
+ HTTP_HDR,
+ HTTP_DONE
+} http_stream_parse_state_t;
+
+typedef struct {
+ h2o_socket_t *sock;
+ h2o_stream_state_t state;
+
+ rbuf_t rx;
+ pthread_cond_t rx_buf_cond;
+ pthread_mutex_t rx_buf_lock;
+
+ rbuf_t tx;
+ h2o_iovec_t tx_buf;
+ pthread_mutex_t tx_buf_lock;
+
+ http_stream_parse_state_t parse_state;
+ char *url;
+ char *user_agent;
+
+ int shutdown;
+} h2o_stream_conn_t;
+
+// h2o_stream_conn_t related functions
+void h2o_stream_conn_t_init(h2o_stream_conn_t *conn);
+void h2o_stream_conn_t_destroy(h2o_stream_conn_t *conn);
+
+// streaming upgrade related functions
+int is_streaming_handshake(h2o_req_t *req);
+void stream_on_complete(void *user_data, h2o_socket_t *sock, size_t reqsize);
+
+// read and write functions to be used by streaming parser
+int h2o_stream_write(void *ctx, const char *data, size_t data_len);
+size_t h2o_stream_read(void *ctx, char *buf, size_t read_bytes);
+
+// call this periodically to check if there are any pending write requests
+void h2o_stream_check_pending_write_reqs(void);
+
+#endif /* HTTPD_STREAMING_H */
diff --git a/src/web/server/static/README.md b/src/web/server/static/README.md
new file mode 100644
index 000000000..e67949e21
--- /dev/null
+++ b/src/web/server/static/README.md
@@ -0,0 +1,21 @@
+<!--
+title: "`static-threaded` web server"
+description: "The Netdata Agent's static-threaded web server spawns a fixed number of threads that listen to web requests and uses non-blocking I/O."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/src/web/server/static/README.md
+sidebar_label: "`static-threaded` web server"
+learn_status: "Published"
+learn_topic_type: "Tasks"
+learn_rel_path: "Developers/Web"
+-->
+
+# `static-threaded` web server
+
+The `static-threaded` web server spawns a fixed number of threads.
+All the threads are concurrently listening for web requests on the same sockets.
+The kernel distributes the incoming requests to them.
+
+Each thread uses non-blocking I/O so it can serve any number of web requests in parallel.
+
+This web server respects the `keep-alive` HTTP header to serve multiple HTTP requests via the same connection.
+
+
diff --git a/src/web/server/static/static-threaded.c b/src/web/server/static/static-threaded.c
new file mode 100644
index 000000000..a4b24c9ac
--- /dev/null
+++ b/src/web/server/static/static-threaded.c
@@ -0,0 +1,562 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#define WEB_SERVER_INTERNALS 1
+#include "static-threaded.h"
+
+int web_client_timeout = DEFAULT_DISCONNECT_IDLE_WEB_CLIENTS_AFTER_SECONDS;
+int web_client_first_request_timeout = DEFAULT_TIMEOUT_TO_RECEIVE_FIRST_WEB_REQUEST;
+long web_client_streaming_rate_t = 0L;
+
+#define WORKER_JOB_ADD_CONNECTION 0
+#define WORKER_JOB_DEL_COLLECTION 1
+#define WORKER_JOB_ADD_FILE 2
+#define WORKER_JOB_DEL_FILE 3
+#define WORKER_JOB_READ_FILE 4
+#define WORKER_JOB_WRITE_FILE 5
+#define WORKER_JOB_RCV_DATA 6
+#define WORKER_JOB_SND_DATA 7
+#define WORKER_JOB_PROCESS 8
+
+#if (WORKER_UTILIZATION_MAX_JOB_TYPES < 9)
+#error Please increase WORKER_UTILIZATION_MAX_JOB_TYPES to at least 8
+#endif
+
+/*
+ * --------------------------------------------------------------------------------------------------------------------
+ * Build web_client state from the pollinfo that describes an accepted connection.
+ */
+static struct web_client *web_client_create_on_fd(POLLINFO *pi) {
+ struct web_client *w;
+
+ w = web_client_get_from_cache();
+ w->ifd = w->ofd = pi->fd;
+
+ strncpyz(w->client_ip, pi->client_ip, sizeof(w->client_ip) - 1);
+ strncpyz(w->client_port, pi->client_port, sizeof(w->client_port) - 1);
+ strncpyz(w->client_host, pi->client_host, sizeof(w->client_host) - 1);
+
+ if(unlikely(!*w->client_ip)) strcpy(w->client_ip, "-");
+ if(unlikely(!*w->client_port)) strcpy(w->client_port, "-");
+ w->port_acl = pi->port_acl;
+
+ int flag = 1;
+ if(unlikely(
+ web_client_check_conn_tcp(w) && setsockopt(w->ifd, IPPROTO_TCP, TCP_NODELAY, (char *) &flag, sizeof(int)) != 0))
+ netdata_log_debug(D_WEB_CLIENT, "%llu: failed to enable TCP_NODELAY on socket fd %d.", w->id, w->ifd);
+
+ flag = 1;
+ if(unlikely(setsockopt(w->ifd, SOL_SOCKET, SO_KEEPALIVE, (char *) &flag, sizeof(int)) != 0))
+ netdata_log_debug(D_WEB_CLIENT, "%llu: failed to enable SO_KEEPALIVE on socket fd %d.", w->id, w->ifd);
+
+ web_client_update_acl_matches(w);
+ web_client_enable_wait_receive(w);
+
+ web_server_log_connection(w, "CONNECTED");
+
+ w->pollinfo_slot = pi->slot;
+ return(w);
+}
+
+// --------------------------------------------------------------------------------------
+// the main socket listener - STATIC-THREADED
+
+struct web_server_static_threaded_worker {
+ ND_THREAD *thread;
+
+ int id;
+ int running;
+
+ size_t max_sockets;
+
+ volatile size_t connected;
+ volatile size_t disconnected;
+ volatile size_t receptions;
+ volatile size_t sends;
+ volatile size_t max_concurrent;
+
+ volatile size_t files_read;
+ volatile size_t file_reads;
+};
+
+static long long static_threaded_workers_count = 1;
+
+static struct web_server_static_threaded_worker *static_workers_private_data = NULL;
+static __thread struct web_server_static_threaded_worker *worker_private = NULL;
+
+// ----------------------------------------------------------------------------
+
+static inline int web_server_check_client_status(struct web_client *w) {
+ if(unlikely(web_client_check_dead(w) || (!web_client_has_wait_receive(w) && !web_client_has_wait_send(w))))
+ return -1;
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// web server files
+
+static void *web_server_file_add_callback(POLLINFO *pi, short int *events, void *data) {
+ struct web_client *w = (struct web_client *)data;
+
+ worker_is_busy(WORKER_JOB_ADD_FILE);
+
+ worker_private->files_read++;
+
+ netdata_log_debug(D_WEB_CLIENT, "%llu: ADDED FILE READ ON FD %d", w->id, pi->fd);
+ *events = POLLIN;
+ pi->data = w;
+
+ worker_is_idle();
+ return w;
+}
+
+static void web_server_file_del_callback(POLLINFO *pi) {
+ struct web_client *w = (struct web_client *)pi->data;
+ netdata_log_debug(D_WEB_CLIENT, "%llu: RELEASE FILE READ ON FD %d", w->id, pi->fd);
+
+ worker_is_busy(WORKER_JOB_DEL_FILE);
+
+ w->pollinfo_filecopy_slot = 0;
+
+ if(unlikely(!w->pollinfo_slot)) {
+ netdata_log_debug(D_WEB_CLIENT, "%llu: CROSS WEB CLIENT CLEANUP (iFD %d, oFD %d)", w->id, pi->fd, w->ofd);
+ web_server_log_connection(w, "DISCONNECTED");
+ web_client_request_done(w);
+ web_client_release_to_cache(w);
+ global_statistics_web_client_disconnected();
+ }
+
+ worker_is_idle();
+}
+
+static int web_server_file_read_callback(POLLINFO *pi, short int *events) {
+ int retval = -1;
+ struct web_client *w = (struct web_client *)pi->data;
+
+ worker_is_busy(WORKER_JOB_READ_FILE);
+
+ // if there is no POLLINFO linked to this, it means the client disconnected
+ // stop the file reading too
+ if(unlikely(!w->pollinfo_slot)) {
+ netdata_log_debug(D_WEB_CLIENT, "%llu: PREVENTED ATTEMPT TO READ FILE ON FD %d, ON CLOSED WEB CLIENT", w->id, pi->fd);
+ retval = -1;
+ goto cleanup;
+ }
+
+ if(unlikely(w->mode != HTTP_REQUEST_MODE_FILECOPY || w->ifd == w->ofd)) {
+ netdata_log_debug(D_WEB_CLIENT, "%llu: PREVENTED ATTEMPT TO READ FILE ON FD %d, ON NON-FILECOPY WEB CLIENT", w->id, pi->fd);
+ retval = -1;
+ goto cleanup;
+ }
+
+ netdata_log_debug(D_WEB_CLIENT, "%llu: READING FILE ON FD %d", w->id, pi->fd);
+
+ worker_private->file_reads++;
+ ssize_t ret = unlikely(web_client_read_file(w));
+
+ if(likely(web_client_has_wait_send(w))) {
+ POLLJOB *p = pi->p; // our POLLJOB
+ POLLINFO *wpi = pollinfo_from_slot(p, w->pollinfo_slot); // POLLINFO of the client socket
+
+ netdata_log_debug(D_WEB_CLIENT, "%llu: SIGNALING W TO SEND (iFD %d, oFD %d)", w->id, pi->fd, wpi->fd);
+ p->fds[wpi->slot].events |= POLLOUT;
+ }
+
+ if(unlikely(ret <= 0 || w->ifd == w->ofd)) {
+ netdata_log_debug(D_WEB_CLIENT, "%llu: DONE READING FILE ON FD %d", w->id, pi->fd);
+ retval = -1;
+ goto cleanup;
+ }
+
+ *events = POLLIN;
+ retval = 0;
+
+cleanup:
+ worker_is_idle();
+ return retval;
+}
+
+static int web_server_file_write_callback(POLLINFO *pi, short int *events) {
+ (void)pi;
+ (void)events;
+
+ worker_is_busy(WORKER_JOB_WRITE_FILE);
+ netdata_log_error("Writing to web files is not supported!");
+ worker_is_idle();
+
+ return -1;
+}
+
+// ----------------------------------------------------------------------------
+// web server clients
+
+static void *web_server_add_callback(POLLINFO *pi, short int *events, void *data) {
+ (void)data; // Suppress warning on unused argument
+
+ worker_is_busy(WORKER_JOB_ADD_CONNECTION);
+ worker_private->connected++;
+
+ size_t concurrent = worker_private->connected - worker_private->disconnected;
+ if(unlikely(concurrent > worker_private->max_concurrent))
+ worker_private->max_concurrent = concurrent;
+
+ *events = POLLIN;
+
+ netdata_log_debug(D_WEB_CLIENT_ACCESS, "LISTENER on %d: new connection.", pi->fd);
+ struct web_client *w = web_client_create_on_fd(pi);
+
+ if (!strncmp(pi->client_port, "UNIX", 4)) {
+ web_client_set_conn_unix(w);
+ } else {
+ web_client_set_conn_tcp(w);
+ }
+
+#ifdef ENABLE_HTTPS
+ if ((web_client_check_conn_tcp(w)) && (netdata_ssl_web_server_ctx)) {
+ sock_delnonblock(w->ifd);
+
+ //Read the first 7 bytes from the message, but the message
+ //is not removed from the queue, because we are using MSG_PEEK
+ char test[8];
+ if ( recv(w->ifd,test, 7, MSG_PEEK) == 7 ) {
+ test[7] = '\0';
+ }
+ else {
+ // we couldn't read 7 bytes
+ sock_setnonblock(w->ifd);
+ goto cleanup;
+ }
+
+ if(test[0] > 0x17) {
+ // no SSL
+ netdata_ssl_close(&w->ssl); // free any previous SSL data
+ }
+ else {
+ // SSL
+ if(!netdata_ssl_open(&w->ssl, netdata_ssl_web_server_ctx, w->ifd) || !netdata_ssl_accept(&w->ssl))
+ WEB_CLIENT_IS_DEAD(w);
+ }
+
+ sock_setnonblock(w->ifd);
+ }
+#endif
+
+ netdata_log_debug(D_WEB_CLIENT, "%llu: ADDED CLIENT FD %d", w->id, pi->fd);
+
+#ifdef ENABLE_HTTPS
+cleanup:
+#endif
+ worker_is_idle();
+ return w;
+}
+
+// TCP client disconnected
+static void web_server_del_callback(POLLINFO *pi) {
+ worker_is_busy(WORKER_JOB_DEL_COLLECTION);
+
+ worker_private->disconnected++;
+
+ struct web_client *w = (struct web_client *)pi->data;
+
+ w->pollinfo_slot = 0;
+ if(unlikely(w->pollinfo_filecopy_slot)) {
+ POLLINFO *fpi = pollinfo_from_slot(pi->p, w->pollinfo_filecopy_slot); // POLLINFO of the client socket
+ (void)fpi;
+
+ netdata_log_debug(D_WEB_CLIENT, "%llu: THE CLIENT WILL BE FRED BY READING FILE JOB ON FD %d", w->id, fpi->fd);
+ }
+ else {
+ if(web_client_flag_check(w, WEB_CLIENT_FLAG_DONT_CLOSE_SOCKET))
+ pi->flags |= POLLINFO_FLAG_DONT_CLOSE;
+
+ netdata_log_debug(D_WEB_CLIENT, "%llu: CLOSING CLIENT FD %d", w->id, pi->fd);
+ web_server_log_connection(w, "DISCONNECTED");
+ web_client_request_done(w);
+ web_client_release_to_cache(w);
+ global_statistics_web_client_disconnected();
+ }
+
+ worker_is_idle();
+}
+
+static int web_server_rcv_callback(POLLINFO *pi, short int *events) {
+ int ret = -1;
+ worker_is_busy(WORKER_JOB_RCV_DATA);
+
+ worker_private->receptions++;
+
+ struct web_client *w = (struct web_client *)pi->data;
+ int fd = pi->fd;
+
+ ssize_t bytes;
+ bytes = web_client_receive(w);
+
+ if (likely(bytes > 0)) {
+ netdata_log_debug(D_WEB_CLIENT, "%llu: processing received data on fd %d.", w->id, fd);
+ worker_is_idle();
+ worker_is_busy(WORKER_JOB_PROCESS);
+ web_client_process_request_from_web_server(w);
+
+ if (unlikely(w->mode == HTTP_REQUEST_MODE_STREAM)) {
+ web_client_send(w);
+ }
+
+ else if(unlikely(w->mode == HTTP_REQUEST_MODE_FILECOPY)) {
+ if(w->pollinfo_filecopy_slot == 0) {
+ netdata_log_debug(D_WEB_CLIENT, "%llu: FILECOPY DETECTED ON FD %d", w->id, pi->fd);
+
+ if (unlikely(w->ifd != -1 && w->ifd != w->ofd && w->ifd != fd)) {
+ // add a new socket to poll_events, with the same
+ netdata_log_debug(D_WEB_CLIENT, "%llu: CREATING FILECOPY SLOT ON FD %d", w->id, pi->fd);
+
+ POLLINFO *fpi = poll_add_fd(
+ pi->p
+ , w->ifd
+ , pi->port_acl
+ , 0
+ , POLLINFO_FLAG_CLIENT_SOCKET
+ , "FILENAME"
+ , ""
+ , ""
+ , web_server_file_add_callback
+ , web_server_file_del_callback
+ , web_server_file_read_callback
+ , web_server_file_write_callback
+ , (void *) w
+ );
+
+ if(fpi)
+ w->pollinfo_filecopy_slot = fpi->slot;
+ else {
+ netdata_log_error("Failed to add filecopy fd. Closing client.");
+ ret = -1;
+ goto cleanup;
+ }
+ }
+ }
+ }
+ else {
+ if(unlikely(w->ifd == fd && web_client_has_wait_receive(w)))
+ *events |= POLLIN;
+ }
+
+ if(unlikely(w->ofd == fd && web_client_has_wait_send(w)))
+ *events |= POLLOUT;
+ } else if(unlikely(bytes < 0)) {
+ ret = -1;
+ goto cleanup;
+ } else if (unlikely(bytes == 0)) {
+ if(unlikely(w->ifd == fd && web_client_has_ssl_wait_receive(w)))
+ *events |= POLLIN;
+
+ if(unlikely(w->ofd == fd && web_client_has_ssl_wait_send(w)))
+ *events |= POLLOUT;
+ }
+
+ ret = web_server_check_client_status(w);
+
+cleanup:
+ worker_is_idle();
+ return ret;
+}
+
+static int web_server_snd_callback(POLLINFO *pi, short int *events) {
+ int retval = -1;
+ worker_is_busy(WORKER_JOB_SND_DATA);
+
+ worker_private->sends++;
+
+ struct web_client *w = (struct web_client *)pi->data;
+ int fd = pi->fd;
+
+ netdata_log_debug(D_WEB_CLIENT, "%llu: sending data on fd %d.", w->id, fd);
+
+ ssize_t ret = web_client_send(w);
+
+ if(unlikely(ret < 0)) {
+ retval = -1;
+ goto cleanup;
+ }
+
+ if(unlikely(w->ifd == fd && web_client_has_wait_receive(w)))
+ *events |= POLLIN;
+
+ if(unlikely(w->ofd == fd && web_client_has_wait_send(w)))
+ *events |= POLLOUT;
+
+ retval = web_server_check_client_status(w);
+
+cleanup:
+ worker_is_idle();
+ return retval;
+}
+
+// ----------------------------------------------------------------------------
+// web server worker thread
+
+static void socket_listen_main_static_threaded_worker_cleanup(void *pptr) {
+ worker_private = CLEANUP_FUNCTION_GET_PTR(pptr);
+ if(!worker_private) return;
+
+ netdata_log_info("stopped after %zu connects, %zu disconnects (max concurrent %zu), %zu receptions and %zu sends",
+ worker_private->connected,
+ worker_private->disconnected,
+ worker_private->max_concurrent,
+ worker_private->receptions,
+ worker_private->sends
+ );
+
+ worker_private->running = 0;
+ worker_unregister();
+}
+
+static bool web_server_should_stop(void) {
+ return !service_running(SERVICE_WEB_SERVER);
+}
+
+void *socket_listen_main_static_threaded_worker(void *ptr) {
+ worker_private = ptr;
+ worker_private->running = 1;
+ worker_register("WEB");
+ worker_register_job_name(WORKER_JOB_ADD_CONNECTION, "connect");
+ worker_register_job_name(WORKER_JOB_DEL_COLLECTION, "disconnect");
+ worker_register_job_name(WORKER_JOB_ADD_FILE, "file start");
+ worker_register_job_name(WORKER_JOB_DEL_FILE, "file end");
+ worker_register_job_name(WORKER_JOB_READ_FILE, "file read");
+ worker_register_job_name(WORKER_JOB_WRITE_FILE, "file write");
+ worker_register_job_name(WORKER_JOB_RCV_DATA, "receive");
+ worker_register_job_name(WORKER_JOB_SND_DATA, "send");
+ worker_register_job_name(WORKER_JOB_PROCESS, "process");
+
+ CLEANUP_FUNCTION_REGISTER(socket_listen_main_static_threaded_worker_cleanup) cleanup_ptr = worker_private;
+ poll_events(&api_sockets
+ , web_server_add_callback
+ , web_server_del_callback
+ , web_server_rcv_callback
+ , web_server_snd_callback
+ , NULL
+ , web_server_should_stop
+ , web_allow_connections_from
+ , web_allow_connections_dns
+ , NULL
+ , web_client_first_request_timeout
+ , web_client_timeout
+ , default_rrd_update_every * 1000 // timer_milliseconds
+ , ptr // timer_data
+ , worker_private->max_sockets
+ );
+
+ return NULL;
+}
+
+
+// ----------------------------------------------------------------------------
+// web server main thread - also becomes a worker
+
+static void socket_listen_main_static_threaded_cleanup(void *pptr) {
+ struct netdata_static_thread *static_thread = CLEANUP_FUNCTION_GET_PTR(pptr);
+ if(!static_thread) return;
+
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
+
+// int i, found = 0;
+// usec_t max = 2 * USEC_PER_SEC, step = 50000;
+//
+// // we start from 1, - 0 is self
+// for(i = 1; i < static_threaded_workers_count; i++) {
+// if(static_workers_private_data[i].running) {
+// found++;
+// netdata_log_info("stopping worker %d", i + 1);
+// nd_thread_signal_cancel(static_workers_private_data[i].thread);
+// }
+// else
+// netdata_log_info("found stopped worker %d", i + 1);
+// }
+//
+// while(found && max > 0) {
+// max -= step;
+// netdata_log_info("Waiting %d static web threads to finish...", found);
+// sleep_usec(step);
+// found = 0;
+//
+// // we start from 1, - 0 is self
+// for(i = 1; i < static_threaded_workers_count; i++) {
+// if (static_workers_private_data[i].running)
+// found++;
+// }
+// }
+//
+// if(found)
+// netdata_log_error("%d static web threads are taking too long to finish. Giving up.", found);
+
+ netdata_log_info("closing all web server sockets...");
+ listen_sockets_close(&api_sockets);
+
+ netdata_log_info("all static web threads stopped.");
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+}
+
+void *socket_listen_main_static_threaded(void *ptr) {
+ CLEANUP_FUNCTION_REGISTER(socket_listen_main_static_threaded_cleanup) cleanup_ptr = ptr;
+ web_server_mode = WEB_SERVER_MODE_STATIC_THREADED;
+
+ if(!api_sockets.opened)
+ fatal("LISTENER: no listen sockets available.");
+
+#ifdef ENABLE_HTTPS
+ netdata_ssl_validate_certificate = !config_get_boolean(CONFIG_SECTION_WEB, "ssl skip certificate verification", !netdata_ssl_validate_certificate);
+
+ if(!netdata_ssl_validate_certificate_sender)
+ netdata_log_info("SSL: web server will skip SSL certificates verification.");
+
+ netdata_ssl_initialize_ctx(NETDATA_SSL_WEB_SERVER_CTX);
+#endif
+
+ // 6 threads is the optimal value
+ // since 6 are the parallel connections browsers will do
+ // so, if the machine has more CPUs, avoid using resources unnecessarily
+ int def_thread_count = MIN(get_netdata_cpus(), 6);
+
+ if (!strcmp(config_get(CONFIG_SECTION_WEB, "mode", ""),"single-threaded")) {
+ netdata_log_info("Running web server with one thread, because mode is single-threaded");
+ config_set(CONFIG_SECTION_WEB, "mode", "static-threaded");
+ def_thread_count = 1;
+ }
+ static_threaded_workers_count = config_get_number(CONFIG_SECTION_WEB, "web server threads", def_thread_count);
+
+ if (static_threaded_workers_count < 1) static_threaded_workers_count = 1;
+
+#ifdef ENABLE_HTTPS
+ // See https://github.com/netdata/netdata/issues/11081#issuecomment-831998240 for more details
+ if (OPENSSL_VERSION_NUMBER < OPENSSL_VERSION_110) {
+ static_threaded_workers_count = 1;
+ netdata_log_info("You are running an OpenSSL older than 1.1.0, web server will not enable multithreading.");
+ }
+#endif
+
+ size_t max_sockets = (size_t)config_get_number(CONFIG_SECTION_WEB, "web server max sockets",
+ (long long int)(rlimit_nofile.rlim_cur / 4));
+
+ static_workers_private_data = callocz((size_t)static_threaded_workers_count,
+ sizeof(struct web_server_static_threaded_worker));
+
+ int i;
+ for (i = 1; i < static_threaded_workers_count; i++) {
+ static_workers_private_data[i].id = i;
+ static_workers_private_data[i].max_sockets = max_sockets / static_threaded_workers_count;
+
+ char tag[50 + 1];
+ snprintfz(tag, sizeof(tag) - 1, "WEB[%d]", i+1);
+
+ netdata_log_info("starting worker %d", i+1);
+ static_workers_private_data[i].thread = nd_thread_create(tag, NETDATA_THREAD_OPTION_DEFAULT,
+ socket_listen_main_static_threaded_worker,
+ (void *)&static_workers_private_data[i]);
+ }
+
+ // and the main one
+ static_workers_private_data[0].max_sockets = max_sockets / static_threaded_workers_count;
+ socket_listen_main_static_threaded_worker((void *)&static_workers_private_data[0]);
+
+ return NULL;
+}
diff --git a/src/web/server/static/static-threaded.h b/src/web/server/static/static-threaded.h
new file mode 100644
index 000000000..a8c5335ef
--- /dev/null
+++ b/src/web/server/static/static-threaded.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_WEB_SERVER_STATIC_THREADED_H
+#define NETDATA_WEB_SERVER_STATIC_THREADED_H
+
+#include "web/server/web_server.h"
+
+void *socket_listen_main_static_threaded(void *ptr);
+
+#endif //NETDATA_WEB_SERVER_STATIC_THREADED_H
diff --git a/src/web/server/web_client.c b/src/web/server/web_client.c
new file mode 100644
index 000000000..27fcf29c7
--- /dev/null
+++ b/src/web/server/web_client.c
@@ -0,0 +1,2081 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "web_client.h"
+
+// this is an async I/O implementation of the web server request parser
+// it is used by all netdata web servers
+
+int respect_web_browser_do_not_track_policy = 0;
+char *web_x_frame_options = NULL;
+
+int web_enable_gzip = 1, web_gzip_level = 3, web_gzip_strategy = Z_DEFAULT_STRATEGY;
+
+void web_client_set_conn_tcp(struct web_client *w) {
+ web_client_flags_clear_conn(w);
+ web_client_flag_set(w, WEB_CLIENT_FLAG_CONN_TCP);
+}
+
+void web_client_set_conn_unix(struct web_client *w) {
+ web_client_flags_clear_conn(w);
+ web_client_flag_set(w, WEB_CLIENT_FLAG_CONN_UNIX);
+}
+
+void web_client_set_conn_cloud(struct web_client *w) {
+ web_client_flags_clear_conn(w);
+ web_client_flag_set(w, WEB_CLIENT_FLAG_CONN_CLOUD);
+}
+
+void web_client_set_conn_webrtc(struct web_client *w) {
+ web_client_flags_clear_conn(w);
+ web_client_flag_set(w, WEB_CLIENT_FLAG_CONN_WEBRTC);
+}
+
+void web_client_reset_permissions(struct web_client *w) {
+ web_client_flags_clear_auth(w);
+ w->access = HTTP_ACCESS_NONE;
+}
+
+void web_client_set_permissions(struct web_client *w, HTTP_ACCESS access, HTTP_USER_ROLE role, WEB_CLIENT_FLAGS auth) {
+ web_client_reset_permissions(w);
+ web_client_flag_set(w, auth & WEB_CLIENT_FLAG_ALL_AUTHS);
+ w->access = access;
+ w->user_role = role;
+}
+
+inline int web_client_permission_denied_acl(struct web_client *w) {
+ w->response.data->content_type = CT_TEXT_PLAIN;
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "You need to be authorized to access this resource");
+ w->response.code = HTTP_RESP_UNAVAILABLE_FOR_LEGAL_REASONS;
+ return HTTP_RESP_UNAVAILABLE_FOR_LEGAL_REASONS;
+}
+
+inline int web_client_permission_denied(struct web_client *w) {
+ w->response.data->content_type = CT_TEXT_PLAIN;
+ buffer_flush(w->response.data);
+
+ if(w->access & HTTP_ACCESS_SIGNED_ID)
+ buffer_strcat(w->response.data,
+ "You don't have enough permissions to access this resource");
+ else
+ buffer_strcat(w->response.data,
+ "You need to be authorized to access this resource");
+
+ w->response.code = HTTP_ACCESS_PERMISSION_DENIED_HTTP_CODE(w->access);
+ return w->response.code;
+}
+
+inline int web_client_service_unavailable(struct web_client *w) {
+ w->response.data->content_type = CT_TEXT_PLAIN;
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "This service is currently unavailable.");
+ w->response.code = HTTP_RESP_SERVICE_UNAVAILABLE;
+ return HTTP_RESP_SERVICE_UNAVAILABLE;
+}
+
+static inline int bad_request_multiple_dashboard_versions(struct web_client *w) {
+ w->response.data->content_type = CT_TEXT_PLAIN;
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "Multiple dashboard versions given at the URL.");
+ w->response.code = HTTP_RESP_BAD_REQUEST;
+ return HTTP_RESP_BAD_REQUEST;
+}
+
+static inline int web_client_cork_socket(struct web_client *w __maybe_unused) {
+#ifdef TCP_CORK
+ if(likely(web_client_check_conn_tcp(w) && !w->tcp_cork && w->ofd != -1)) {
+ w->tcp_cork = true;
+ if(unlikely(setsockopt(w->ofd, IPPROTO_TCP, TCP_CORK, (char *) &w->tcp_cork, sizeof(int)) != 0)) {
+ netdata_log_error("%llu: failed to enable TCP_CORK on socket.", w->id);
+
+ w->tcp_cork = false;
+ return -1;
+ }
+ }
+#endif /* TCP_CORK */
+
+ return 0;
+}
+
+#ifdef ENABLE_HTTPS
+static inline void web_client_enable_wait_from_ssl(struct web_client *w) {
+ if (w->ssl.ssl_errno == SSL_ERROR_WANT_READ)
+ web_client_enable_ssl_wait_receive(w);
+ else if (w->ssl.ssl_errno == SSL_ERROR_WANT_WRITE)
+ web_client_enable_ssl_wait_send(w);
+ else {
+ web_client_disable_ssl_wait_receive(w);
+ web_client_disable_ssl_wait_send(w);
+ }
+}
+#endif
+
+static inline int web_client_uncork_socket(struct web_client *w __maybe_unused) {
+#ifdef TCP_CORK
+ if(likely(w->tcp_cork && w->ofd != -1)) {
+ w->tcp_cork = false;
+ if(unlikely(setsockopt(w->ofd, IPPROTO_TCP, TCP_CORK, (char *) &w->tcp_cork, sizeof(int)) != 0)) {
+ netdata_log_error("%llu: failed to disable TCP_CORK on socket.", w->id);
+ w->tcp_cork = true;
+ return -1;
+ }
+ }
+#endif /* TCP_CORK */
+
+ w->tcp_cork = false;
+ return 0;
+}
+
+static inline char *strip_control_characters(char *url) {
+ if(!url) return "";
+
+ for(char *s = url; *s ;s++)
+ if(iscntrl((uint8_t)*s)) *s = ' ';
+
+ return url;
+}
+
+static void web_client_reset_allocations(struct web_client *w, bool free_all) {
+
+ if(free_all) {
+ // the web client is to be destroyed
+
+ buffer_free(w->url_as_received);
+ w->url_as_received = NULL;
+
+ buffer_free(w->url_path_decoded);
+ w->url_path_decoded = NULL;
+
+ buffer_free(w->url_query_string_decoded);
+ w->url_query_string_decoded = NULL;
+
+ buffer_free(w->response.header_output);
+ w->response.header_output = NULL;
+
+ buffer_free(w->response.header);
+ w->response.header = NULL;
+
+ buffer_free(w->response.data);
+ w->response.data = NULL;
+
+ buffer_free(w->payload);
+ w->payload = NULL;
+ }
+ else {
+ // the web client is to be re-used
+
+ buffer_reset(w->url_as_received);
+ buffer_reset(w->url_path_decoded);
+ buffer_reset(w->url_query_string_decoded);
+
+ buffer_reset(w->response.header_output);
+ buffer_reset(w->response.header);
+ buffer_reset(w->response.data);
+
+ if(w->payload)
+ buffer_reset(w->payload);
+
+ // to add more items here,
+ // web_client_reuse_from_cache() needs to be adjusted to maintain them
+ }
+
+ freez(w->server_host);
+ w->server_host = NULL;
+
+ freez(w->forwarded_host);
+ w->forwarded_host = NULL;
+
+ freez(w->forwarded_for);
+ w->forwarded_for = NULL;
+
+ freez(w->origin);
+ w->origin = NULL;
+
+ freez(w->user_agent);
+ w->user_agent = NULL;
+
+ freez(w->auth_bearer_token);
+ w->auth_bearer_token = NULL;
+
+ // if we had enabled compression, release it
+ if(w->response.zinitialized) {
+ deflateEnd(&w->response.zstream);
+ w->response.zsent = 0;
+ w->response.zhave = 0;
+ w->response.zstream.avail_in = 0;
+ w->response.zstream.avail_out = 0;
+ w->response.zstream.total_in = 0;
+ w->response.zstream.total_out = 0;
+ w->response.zinitialized = false;
+ web_client_flag_clear(w, WEB_CLIENT_CHUNKED_TRANSFER);
+ }
+
+ memset(w->transaction, 0, sizeof(w->transaction));
+ web_client_reset_permissions(w);
+ web_client_flag_clear(w, WEB_CLIENT_ENCODING_GZIP|WEB_CLIENT_ENCODING_DEFLATE);
+ web_client_reset_path_flags(w);
+}
+
+void web_client_log_completed_request(struct web_client *w, bool update_web_stats) {
+ struct timeval tv;
+ now_monotonic_high_precision_timeval(&tv);
+
+ size_t size = (w->mode == HTTP_REQUEST_MODE_FILECOPY) ? w->response.rlen : w->response.data->len;
+ size_t sent = w->response.zoutput ? (size_t)w->response.zstream.total_out : size;
+
+ if(update_web_stats)
+ global_statistics_web_request_completed(dt_usec(&tv, &w->timings.tv_in),
+ w->statistics.received_bytes,
+ w->statistics.sent_bytes,
+ size,
+ sent);
+
+ usec_t prep_ut = w->timings.tv_ready.tv_sec ? dt_usec(&w->timings.tv_ready, &w->timings.tv_in) : 0;
+ usec_t sent_ut = w->timings.tv_ready.tv_sec ? dt_usec(&tv, &w->timings.tv_ready) : 0;
+ usec_t total_ut = dt_usec(&tv, &w->timings.tv_in);
+ strip_control_characters((char *)buffer_tostring(w->url_as_received));
+
+ ND_LOG_STACK lgs[] = {
+ ND_LOG_FIELD_U64(NDF_CONNECTION_ID, w->id),
+ ND_LOG_FIELD_UUID(NDF_TRANSACTION_ID, &w->transaction),
+ ND_LOG_FIELD_TXT(NDF_NIDL_NODE, w->client_host),
+ ND_LOG_FIELD_TXT(NDF_REQUEST_METHOD, HTTP_REQUEST_MODE_2str(w->mode)),
+ ND_LOG_FIELD_BFR(NDF_REQUEST, w->url_as_received),
+ ND_LOG_FIELD_U64(NDF_RESPONSE_CODE, w->response.code),
+ ND_LOG_FIELD_U64(NDF_RESPONSE_SENT_BYTES, sent),
+ ND_LOG_FIELD_U64(NDF_RESPONSE_SIZE_BYTES, size),
+ ND_LOG_FIELD_U64(NDF_RESPONSE_PREPARATION_TIME_USEC, prep_ut),
+ ND_LOG_FIELD_U64(NDF_RESPONSE_SENT_TIME_USEC, sent_ut),
+ ND_LOG_FIELD_U64(NDF_RESPONSE_TOTAL_TIME_USEC, total_ut),
+ ND_LOG_FIELD_TXT(NDF_SRC_IP, w->client_ip),
+ ND_LOG_FIELD_TXT(NDF_SRC_PORT, w->client_port),
+ ND_LOG_FIELD_TXT(NDF_SRC_FORWARDED_FOR, w->forwarded_for),
+ ND_LOG_FIELD_UUID(NDF_ACCOUNT_ID, &w->auth.cloud_account_id),
+ ND_LOG_FIELD_TXT(NDF_USER_NAME, w->auth.client_name),
+ ND_LOG_FIELD_TXT(NDF_USER_ROLE, http_id2user_role(w->user_role)),
+ ND_LOG_FIELD_CB(NDF_USER_ACCESS, log_cb_http_access_to_hex, &w->access),
+ ND_LOG_FIELD_END(),
+ };
+ ND_LOG_STACK_PUSH(lgs);
+
+ ND_LOG_FIELD_PRIORITY prio = NDLP_INFO;
+ if(w->response.code >= 500)
+ prio = NDLP_EMERG;
+ else if(w->response.code >= 400)
+ prio = NDLP_WARNING;
+ else if(w->response.code >= 300)
+ prio = NDLP_NOTICE;
+
+ // cleanup progress
+ if(web_client_flag_check(w, WEB_CLIENT_FLAG_PROGRESS_TRACKING)) {
+ web_client_flag_clear(w, WEB_CLIENT_FLAG_PROGRESS_TRACKING);
+ query_progress_finished(&w->transaction, 0, w->response.code, total_ut, size, sent);
+ }
+
+ // access log
+ if(likely(buffer_strlen(w->url_as_received)))
+ nd_log(NDLS_ACCESS, prio, NULL);
+}
+
+void web_client_request_done(struct web_client *w) {
+ web_client_uncork_socket(w);
+
+ netdata_log_debug(D_WEB_CLIENT, "%llu: Resetting client.", w->id);
+
+ web_client_log_completed_request(w, true);
+
+ if(unlikely(w->mode == HTTP_REQUEST_MODE_FILECOPY)) {
+ if(w->ifd != w->ofd) {
+ netdata_log_debug(D_WEB_CLIENT, "%llu: Closing filecopy input file descriptor %d.", w->id, w->ifd);
+
+ if(web_server_mode != WEB_SERVER_MODE_STATIC_THREADED) {
+ if (w->ifd != -1){
+ close(w->ifd);
+ }
+ }
+
+ w->ifd = w->ofd;
+ }
+ }
+
+ web_client_reset_allocations(w, false);
+
+ w->mode = HTTP_REQUEST_MODE_GET;
+
+ web_client_disable_donottrack(w);
+ web_client_disable_tracking_required(w);
+ web_client_disable_keepalive(w);
+
+ w->header_parse_tries = 0;
+ w->header_parse_last_size = 0;
+
+ web_client_enable_wait_receive(w);
+ web_client_disable_wait_send(w);
+
+ w->response.has_cookies = false;
+ w->response.rlen = 0;
+ w->response.sent = 0;
+ w->response.code = 0;
+ w->response.zoutput = false;
+
+ w->statistics.received_bytes = 0;
+ w->statistics.sent_bytes = 0;
+}
+
+static int append_slash_to_url_and_redirect(struct web_client *w) {
+ // this function returns a relative redirect
+ // it finds the last path component on the URL and just appends / to it
+ //
+ // So, if the URL is:
+ //
+ // /path/to/file?query_string
+ //
+ // It adds a Location header like this:
+ //
+ // Location: file/?query_string\r\n
+ //
+ // The web browser already knows that it is inside /path/to/
+ // so it converts the path to /path/to/file/ and executes the
+ // request again.
+
+ buffer_strcat(w->response.header, "Location: ");
+ const char *b = buffer_tostring(w->url_as_received);
+ const char *q = strchr(b, '?');
+ if(q && q > b) {
+ const char *e = q - 1;
+ while(e > b && *e != '/') e--;
+ if(*e == '/') e++;
+
+ size_t len = q - e;
+ buffer_strncat(w->response.header, e, len);
+ buffer_strncat(w->response.header, "/", 1);
+ buffer_strcat(w->response.header, q);
+ }
+ else {
+ const char *e = &b[buffer_strlen(w->url_as_received) - 1];
+ while(e > b && *e != '/') e--;
+ if(*e == '/') e++;
+
+ buffer_strcat(w->response.header, e);
+ buffer_strncat(w->response.header, "/", 1);
+ }
+
+ buffer_strncat(w->response.header, "\r\n", 2);
+
+ w->response.data->content_type = CT_TEXT_HTML;
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data,
+ "<!DOCTYPE html><html>"
+ "<body onload=\"window.location.href = window.location.origin + window.location.pathname + '/' + window.location.search + window.location.hash\">"
+ "Redirecting. In case your browser does not support redirection, please click "
+ "<a onclick=\"window.location.href = window.location.origin + window.location.pathname + '/' + window.location.search + window.location.hash\">here</a>."
+ "</body></html>");
+ return HTTP_RESP_MOVED_PERM;
+}
+
+// Work around a bug in the CMocka library by removing this function during testing.
+#ifndef REMOVE_MYSENDFILE
+
+static inline int dashboard_version(struct web_client *w) {
+ if(!web_client_flag_check(w, WEB_CLIENT_FLAG_PATH_WITH_VERSION))
+ return -1;
+
+ if(web_client_flag_check(w, WEB_CLIENT_FLAG_PATH_IS_V0))
+ return 0;
+ if(web_client_flag_check(w, WEB_CLIENT_FLAG_PATH_IS_V1))
+ return 1;
+ if(web_client_flag_check(w, WEB_CLIENT_FLAG_PATH_IS_V2))
+ return 2;
+
+ return -1;
+}
+
+static bool find_filename_to_serve(const char *filename, char *dst, size_t dst_len, struct stat *statbuf, struct web_client *w, bool *is_dir) {
+ int d_version = dashboard_version(w);
+ bool has_extension = web_client_flag_check(w, WEB_CLIENT_FLAG_PATH_HAS_FILE_EXTENSION);
+
+ int fallback = 0;
+
+ if(has_extension) {
+ if(d_version == -1)
+ snprintfz(dst, dst_len, "%s/%s", netdata_configured_web_dir, filename);
+ else {
+ // check if the filename or directory exists
+ // fallback to the same path without the dashboard version otherwise
+ snprintfz(dst, dst_len, "%s/v%d/%s", netdata_configured_web_dir, d_version, filename);
+ fallback = 1;
+ }
+ }
+ else if(d_version != -1) {
+ if(filename && *filename) {
+ // check if the filename exists
+ // fallback to /vN/index.html otherwise
+ snprintfz(dst, dst_len, "%s/%s", netdata_configured_web_dir, filename);
+ fallback = 2;
+ }
+ else {
+ if(filename && *filename)
+ web_client_flag_set(w, WEB_CLIENT_FLAG_PATH_HAS_TRAILING_SLASH);
+ snprintfz(dst, dst_len, "%s/v%d", netdata_configured_web_dir, d_version);
+ }
+ }
+ else {
+ // check if filename exists
+ // this is needed to serve {filename}/index.html, in case a user puts a html file into a directory
+ // fallback to /index.html otherwise
+ snprintfz(dst, dst_len, "%s/%s", netdata_configured_web_dir, filename);
+ fallback = 3;
+ }
+
+ if (stat(dst, statbuf) != 0) {
+ if(fallback == 1) {
+ snprintfz(dst, dst_len, "%s/%s", netdata_configured_web_dir, filename);
+ if (stat(dst, statbuf) != 0)
+ return false;
+ }
+ else if(fallback == 2) {
+ if(filename && *filename)
+ web_client_flag_set(w, WEB_CLIENT_FLAG_PATH_HAS_TRAILING_SLASH);
+ snprintfz(dst, dst_len, "%s/v%d", netdata_configured_web_dir, d_version);
+ if (stat(dst, statbuf) != 0)
+ return false;
+ }
+ else if(fallback == 3) {
+ if(filename && *filename)
+ web_client_flag_set(w, WEB_CLIENT_FLAG_PATH_HAS_TRAILING_SLASH);
+ snprintfz(dst, dst_len, "%s", netdata_configured_web_dir);
+ if (stat(dst, statbuf) != 0)
+ return false;
+ }
+ else
+ return false;
+ }
+
+ if((statbuf->st_mode & S_IFMT) == S_IFDIR) {
+ size_t len = strlen(dst);
+ if(len > dst_len - 11)
+ return false;
+
+ strncpyz(&dst[len], "/index.html", dst_len - len);
+
+ if (stat(dst, statbuf) != 0)
+ return false;
+
+ *is_dir = true;
+ }
+
+ return true;
+}
+
+static int mysendfile(struct web_client *w, char *filename) {
+ netdata_log_debug(D_WEB_CLIENT, "%llu: Looking for file '%s/%s'", w->id, netdata_configured_web_dir, filename);
+
+ if(!http_can_access_dashboard(w))
+ return web_client_permission_denied_acl(w);
+
+ // skip leading slashes
+ while (*filename == '/') filename++;
+
+ // if the filename contains "strange" characters, refuse to serve it
+ char *s;
+ for(s = filename; *s ;s++) {
+ if( !isalnum((uint8_t)*s) && *s != '/' && *s != '.' && *s != '-' && *s != '_') {
+ netdata_log_debug(D_WEB_CLIENT_ACCESS, "%llu: File '%s' is not acceptable.", w->id, filename);
+ w->response.data->content_type = CT_TEXT_HTML;
+ buffer_sprintf(w->response.data, "Filename contains invalid characters: ");
+ buffer_strcat_htmlescape(w->response.data, filename);
+ return HTTP_RESP_BAD_REQUEST;
+ }
+ }
+
+ // if the filename contains a double dot refuse to serve it
+ if(strstr(filename, "..") != 0) {
+ netdata_log_debug(D_WEB_CLIENT_ACCESS, "%llu: File '%s' is not acceptable.", w->id, filename);
+ w->response.data->content_type = CT_TEXT_HTML;
+ buffer_strcat(w->response.data, "Relative filenames are not supported: ");
+ buffer_strcat_htmlescape(w->response.data, filename);
+ return HTTP_RESP_BAD_REQUEST;
+ }
+
+ // find the physical file on disk
+ bool is_dir = false;
+ char web_filename[FILENAME_MAX + 1];
+ struct stat statbuf;
+ if(!find_filename_to_serve(filename, web_filename, FILENAME_MAX, &statbuf, w, &is_dir)) {
+ w->response.data->content_type = CT_TEXT_HTML;
+ buffer_strcat(w->response.data, "File does not exist, or is not accessible: ");
+ buffer_strcat_htmlescape(w->response.data, web_filename);
+ return HTTP_RESP_NOT_FOUND;
+ }
+
+ if(is_dir && !web_client_flag_check(w, WEB_CLIENT_FLAG_PATH_HAS_TRAILING_SLASH))
+ return append_slash_to_url_and_redirect(w);
+
+ // open the file
+ w->ifd = open(web_filename, O_NONBLOCK, O_RDONLY | O_CLOEXEC);
+ if(w->ifd == -1) {
+ w->ifd = w->ofd;
+
+ if(errno == EBUSY || errno == EAGAIN) {
+ netdata_log_error("%llu: File '%s' is busy, sending 307 Moved Temporarily to force retry.", w->id, web_filename);
+ w->response.data->content_type = CT_TEXT_HTML;
+ buffer_sprintf(w->response.header, "Location: /%s\r\n", filename);
+ buffer_strcat(w->response.data, "File is currently busy, please try again later: ");
+ buffer_strcat_htmlescape(w->response.data, web_filename);
+ return HTTP_RESP_REDIR_TEMP;
+ }
+ else {
+ netdata_log_error("%llu: Cannot open file '%s'.", w->id, web_filename);
+ w->response.data->content_type = CT_TEXT_HTML;
+ buffer_strcat(w->response.data, "Cannot open file: ");
+ buffer_strcat_htmlescape(w->response.data, web_filename);
+ return HTTP_RESP_NOT_FOUND;
+ }
+ }
+
+ sock_setnonblock(w->ifd);
+
+ w->response.data->content_type = contenttype_for_filename(web_filename);
+ netdata_log_debug(D_WEB_CLIENT_ACCESS, "%llu: Sending file '%s' (%"PRId64" bytes, ifd %d, ofd %d).", w->id, web_filename, (int64_t)statbuf.st_size, w->ifd, w->ofd);
+
+ w->mode = HTTP_REQUEST_MODE_FILECOPY;
+ web_client_enable_wait_receive(w);
+ web_client_disable_wait_send(w);
+ buffer_flush(w->response.data);
+ buffer_need_bytes(w->response.data, (size_t)statbuf.st_size);
+ w->response.rlen = (size_t)statbuf.st_size;
+#ifdef __APPLE__
+ w->response.data->date = statbuf.st_mtimespec.tv_sec;
+#else
+ w->response.data->date = statbuf.st_mtim.tv_sec;
+#endif
+ buffer_cacheable(w->response.data);
+
+ return HTTP_RESP_OK;
+}
+#endif
+
+static inline int check_host_and_call(RRDHOST *host, struct web_client *w, char *url, int (*func)(RRDHOST *, struct web_client *, char *)) {
+ return func(host, w, url);
+}
+
+int web_client_api_request(RRDHOST *host, struct web_client *w, char *url_path_fragment) {
+ ND_LOG_STACK lgs[] = {
+ ND_LOG_FIELD_TXT(NDF_SRC_IP, w->client_ip),
+ ND_LOG_FIELD_TXT(NDF_SRC_PORT, w->client_port),
+ ND_LOG_FIELD_TXT(NDF_SRC_FORWARDED_HOST, w->forwarded_host),
+ ND_LOG_FIELD_TXT(NDF_SRC_FORWARDED_FOR, w->forwarded_for),
+ ND_LOG_FIELD_TXT(NDF_NIDL_NODE, w->client_host),
+ ND_LOG_FIELD_TXT(NDF_REQUEST_METHOD, HTTP_REQUEST_MODE_2str(w->mode)),
+ ND_LOG_FIELD_BFR(NDF_REQUEST, w->url_as_received),
+ ND_LOG_FIELD_U64(NDF_CONNECTION_ID, w->id),
+ ND_LOG_FIELD_UUID(NDF_TRANSACTION_ID, &w->transaction),
+ ND_LOG_FIELD_UUID(NDF_ACCOUNT_ID, &w->auth.cloud_account_id),
+ ND_LOG_FIELD_TXT(NDF_USER_NAME, w->auth.client_name),
+ ND_LOG_FIELD_TXT(NDF_USER_ROLE, http_id2user_role(w->user_role)),
+ ND_LOG_FIELD_CB(NDF_USER_ACCESS, log_cb_http_access_to_hex, &w->access),
+ ND_LOG_FIELD_END(),
+ };
+ ND_LOG_STACK_PUSH(lgs);
+
+ if(!web_client_flag_check(w, WEB_CLIENT_FLAG_PROGRESS_TRACKING)) {
+ web_client_flag_set(w, WEB_CLIENT_FLAG_PROGRESS_TRACKING);
+ query_progress_start_or_update(&w->transaction, 0, w->mode, w->acl,
+ buffer_tostring(w->url_as_received),
+ w->payload,
+ w->forwarded_for ? w->forwarded_for : w->client_ip);
+ }
+
+ // get the api version
+ char *tok = strsep_skip_consecutive_separators(&url_path_fragment, "/");
+ if(tok && *tok) {
+ if(strcmp(tok, "v2") == 0)
+ return web_client_api_request_v2(host, w, url_path_fragment);
+ else if(strcmp(tok, "v1") == 0)
+ return web_client_api_request_v1(host, w, url_path_fragment);
+ else {
+ buffer_flush(w->response.data);
+ w->response.data->content_type = CT_TEXT_HTML;
+ buffer_strcat(w->response.data, "Unsupported API version: ");
+ buffer_strcat_htmlescape(w->response.data, tok);
+ return HTTP_RESP_NOT_FOUND;
+ }
+ }
+ else {
+ buffer_flush(w->response.data);
+ buffer_sprintf(w->response.data, "Which API version?");
+ return HTTP_RESP_BAD_REQUEST;
+ }
+}
+
+
+/**
+ * Valid Method
+ *
+ * Netdata accepts only three methods, including one of these three(STREAM) is an internal method.
+ *
+ * @param w is the structure with the client request
+ * @param s is the start string to parse
+ *
+ * @return it returns the next address to parse case the method is valid and NULL otherwise.
+ */
+static inline char *web_client_valid_method(struct web_client *w, char *s) {
+ // is is a valid request?
+ if(!strncmp(s, "GET ", 4)) {
+ s = &s[4];
+ w->mode = HTTP_REQUEST_MODE_GET;
+ }
+ else if(!strncmp(s, "OPTIONS ", 8)) {
+ s = &s[8];
+ w->mode = HTTP_REQUEST_MODE_OPTIONS;
+ }
+ else if(!strncmp(s, "POST ", 5)) {
+ s = &s[5];
+ w->mode = HTTP_REQUEST_MODE_POST;
+ }
+ else if(!strncmp(s, "PUT ", 4)) {
+ s = &s[4];
+ w->mode = HTTP_REQUEST_MODE_PUT;
+ }
+ else if(!strncmp(s, "DELETE ", 7)) {
+ s = &s[7];
+ w->mode = HTTP_REQUEST_MODE_DELETE;
+ }
+ else if(!strncmp(s, "STREAM ", 7)) {
+ s = &s[7];
+
+#ifdef ENABLE_HTTPS
+ if (!SSL_connection(&w->ssl) && http_is_using_ssl_force(w)) {
+ w->header_parse_tries = 0;
+ w->header_parse_last_size = 0;
+ web_client_disable_wait_receive(w);
+
+ char hostname[256];
+ char *copyme = strstr(s,"hostname=");
+ if ( copyme ){
+ copyme += 9;
+ char *end = strchr(copyme,'&');
+ if(end){
+ size_t length = MIN(255, end - copyme);
+ memcpy(hostname,copyme,length);
+ hostname[length] = 0X00;
+ }
+ else{
+ memcpy(hostname,"not available",13);
+ hostname[13] = 0x00;
+ }
+ }
+ else{
+ memcpy(hostname,"not available",13);
+ hostname[13] = 0x00;
+ }
+ netdata_log_error("The server is configured to always use encrypted connections, please enable the SSL on child with hostname '%s'.",hostname);
+ s = NULL;
+ }
+#endif
+
+ w->mode = HTTP_REQUEST_MODE_STREAM;
+ }
+ else {
+ s = NULL;
+ }
+
+ return s;
+}
+
+/**
+ * Request validate
+ *
+ * @param w is the structure with the client request
+ *
+ * @return It returns HTTP_VALIDATION_OK on success and another code present
+ * in the enum HTTP_VALIDATION otherwise.
+ */
+HTTP_VALIDATION http_request_validate(struct web_client *w) {
+ char *s = (char *)buffer_tostring(w->response.data), *encoded_url = NULL;
+
+ size_t last_pos = w->header_parse_last_size;
+
+ w->header_parse_tries++;
+ w->header_parse_last_size = buffer_strlen(w->response.data);
+
+ int is_it_valid;
+ if(w->header_parse_tries > 1) {
+ if(last_pos > 4) last_pos -= 4; // allow searching for \r\n\r\n
+ else last_pos = 0;
+
+ if(w->header_parse_last_size < last_pos)
+ last_pos = 0;
+
+ is_it_valid =
+ url_is_request_complete_and_extract_payload(s, &s[last_pos], w->header_parse_last_size, &w->payload);
+ if(!is_it_valid) {
+ if(w->header_parse_tries > HTTP_REQ_MAX_HEADER_FETCH_TRIES) {
+ netdata_log_info("Disabling slow client after %zu attempts to read the request (%zu bytes received)", w->header_parse_tries, buffer_strlen(w->response.data));
+ w->header_parse_tries = 0;
+ w->header_parse_last_size = 0;
+ web_client_disable_wait_receive(w);
+ return HTTP_VALIDATION_TOO_MANY_READ_RETRIES;
+ }
+
+ return HTTP_VALIDATION_INCOMPLETE;
+ }
+
+ is_it_valid = 1;
+ } else {
+ last_pos = w->header_parse_last_size;
+ is_it_valid =
+ url_is_request_complete_and_extract_payload(s, &s[last_pos], w->header_parse_last_size, &w->payload);
+ }
+
+ s = web_client_valid_method(w, s);
+ if (!s) {
+ w->header_parse_tries = 0;
+ w->header_parse_last_size = 0;
+ web_client_disable_wait_receive(w);
+
+ return HTTP_VALIDATION_NOT_SUPPORTED;
+ } else if (!is_it_valid) {
+ //Invalid request, we have more data after the end of message
+ char *check = strstr((char *)buffer_tostring(w->response.data), "\r\n\r\n");
+ if(check) {
+ check += 4;
+ if (*check) {
+ w->header_parse_tries = 0;
+ w->header_parse_last_size = 0;
+ web_client_disable_wait_receive(w);
+ return HTTP_VALIDATION_EXCESS_REQUEST_DATA;
+ }
+ }
+ web_client_enable_wait_receive(w);
+ return HTTP_VALIDATION_INCOMPLETE;
+ }
+
+ //After the method we have the path and query string together
+ encoded_url = s;
+
+ //we search for the position where we have " HTTP/", because it finishes the user request
+ s = url_find_protocol(s);
+
+ // incomplete requests
+ if(unlikely(!*s)) {
+ web_client_enable_wait_receive(w);
+ return HTTP_VALIDATION_INCOMPLETE;
+ }
+
+ // we have the end of encoded_url - remember it
+ char *ue = s;
+
+ // make sure we have complete request
+ // complete requests contain: \r\n\r\n
+ while(*s) {
+ // find a line feed
+ while(*s && *s++ != '\r');
+
+ // did we reach the end?
+ if(unlikely(!*s)) break;
+
+ // is it \r\n ?
+ if(likely(*s++ == '\n')) {
+
+ // is it again \r\n ? (header end)
+ if(unlikely(*s == '\r' && s[1] == '\n')) {
+ // a valid complete HTTP request found
+
+ char c = *ue;
+ *ue = '\0';
+ web_client_decode_path_and_query_string(w, encoded_url);
+ *ue = c;
+
+#ifdef ENABLE_HTTPS
+ if ( (web_client_check_conn_tcp(w)) && (netdata_ssl_web_server_ctx) ) {
+ if (!w->ssl.conn && (http_is_using_ssl_force(w) || http_is_using_ssl_default(w)) && (w->mode != HTTP_REQUEST_MODE_STREAM)) {
+ w->header_parse_tries = 0;
+ w->header_parse_last_size = 0;
+ web_client_disable_wait_receive(w);
+ return HTTP_VALIDATION_REDIRECT;
+ }
+ }
+#endif
+
+ w->header_parse_tries = 0;
+ w->header_parse_last_size = 0;
+ web_client_disable_wait_receive(w);
+ return HTTP_VALIDATION_OK;
+ }
+
+ // another header line
+ s = http_header_parse_line(w, s);
+ }
+ }
+
+ // incomplete request
+ web_client_enable_wait_receive(w);
+ return HTTP_VALIDATION_INCOMPLETE;
+}
+
+static inline ssize_t web_client_send_data(struct web_client *w,const void *buf,size_t len, int flags)
+{
+ ssize_t bytes;
+#ifdef ENABLE_HTTPS
+ if ((web_client_check_conn_tcp(w)) && (netdata_ssl_web_server_ctx)) {
+ if (SSL_connection(&w->ssl)) {
+ bytes = netdata_ssl_write(&w->ssl, buf, len) ;
+ web_client_enable_wait_from_ssl(w);
+ }
+ else
+ bytes = send(w->ofd,buf, len , flags);
+ }
+ else if(web_client_check_conn_tcp(w) || web_client_check_conn_unix(w))
+ bytes = send(w->ofd,buf, len , flags);
+ else
+ bytes = -999;
+#else
+ if(web_client_check_conn_tcp(w) || web_client_check_conn_unix(w))
+ bytes = send(w->ofd, buf, len, flags);
+ else
+ bytes = -999;
+#endif
+
+ return bytes;
+}
+
+void web_client_build_http_header(struct web_client *w) {
+ if(unlikely(w->response.code != HTTP_RESP_OK))
+ buffer_no_cacheable(w->response.data);
+
+ if(unlikely(!w->response.data->date))
+ w->response.data->date = now_realtime_sec();
+
+ // set a proper expiration date, if not already set
+ if(unlikely(!w->response.data->expires))
+ w->response.data->expires = w->response.data->date +
+ ((w->response.data->options & WB_CONTENT_NO_CACHEABLE) ? 0 : 86400);
+
+ // prepare the HTTP response header
+ netdata_log_debug(D_WEB_CLIENT, "%llu: Generating HTTP header with response %d.", w->id, w->response.code);
+
+ const char *code_msg = http_response_code2string(w->response.code);
+
+ // prepare the last modified and expiration dates
+ char rfc7231_date[RFC7231_MAX_LENGTH], rfc7231_expires[RFC7231_MAX_LENGTH];
+ rfc7231_datetime(rfc7231_date, sizeof(rfc7231_date), w->response.data->date);
+ rfc7231_datetime(rfc7231_expires, sizeof(rfc7231_expires), w->response.data->expires);
+
+ if (w->response.code == HTTP_RESP_HTTPS_UPGRADE) {
+ buffer_sprintf(w->response.header_output,
+ "HTTP/1.1 %d %s\r\n"
+ "Location: https://%s%s\r\n",
+ w->response.code, code_msg,
+ w->server_host ? w->server_host : "",
+ buffer_tostring(w->url_as_received));
+ w->response.code = HTTP_RESP_MOVED_PERM;
+ }
+ else {
+ buffer_sprintf(w->response.header_output,
+ "HTTP/1.1 %d %s\r\n"
+ "Connection: %s\r\n"
+ "Server: Netdata Embedded HTTP Server %s\r\n"
+ "Access-Control-Allow-Origin: %s\r\n"
+ "Access-Control-Allow-Credentials: true\r\n"
+ "Date: %s\r\n",
+ w->response.code,
+ code_msg,
+ web_client_has_keepalive(w)?"keep-alive":"close",
+ NETDATA_VERSION,
+ w->origin ? w->origin : "*",
+ rfc7231_date);
+
+ http_header_content_type(w->response.header_output, w->response.data->content_type);
+ }
+
+ if(unlikely(web_x_frame_options))
+ buffer_sprintf(w->response.header_output, "X-Frame-Options: %s\r\n", web_x_frame_options);
+
+ if(w->response.has_cookies) {
+ if(respect_web_browser_do_not_track_policy)
+ buffer_sprintf(w->response.header_output,
+ "Tk: T;cookies\r\n");
+ }
+ else {
+ if(respect_web_browser_do_not_track_policy) {
+ if(web_client_has_tracking_required(w))
+ buffer_sprintf(w->response.header_output,
+ "Tk: T;cookies\r\n");
+ else
+ buffer_sprintf(w->response.header_output,
+ "Tk: N\r\n");
+ }
+ }
+
+ if(w->mode == HTTP_REQUEST_MODE_OPTIONS) {
+ buffer_strcat(w->response.header_output,
+ "Access-Control-Allow-Methods: GET, OPTIONS\r\n"
+ "Access-Control-Allow-Headers: accept, x-requested-with, origin, content-type, cookie, pragma, cache-control, x-auth-token\r\n"
+ "Access-Control-Max-Age: 1209600\r\n" // 86400 * 14
+ );
+ }
+ else {
+ buffer_sprintf(w->response.header_output,
+ "Cache-Control: %s\r\n"
+ "Expires: %s\r\n",
+ (w->response.data->options & WB_CONTENT_NO_CACHEABLE)?"no-cache, no-store, must-revalidate\r\nPragma: no-cache":"public",
+ rfc7231_expires);
+ }
+
+ // copy a possibly available custom header
+ if(unlikely(buffer_strlen(w->response.header)))
+ buffer_strcat(w->response.header_output, buffer_tostring(w->response.header));
+
+ // headers related to the transfer method
+ if(likely(w->response.zoutput))
+ buffer_strcat(w->response.header_output, "Content-Encoding: gzip\r\n");
+
+ if(likely(w->flags & WEB_CLIENT_CHUNKED_TRANSFER))
+ buffer_strcat(w->response.header_output, "Transfer-Encoding: chunked\r\n");
+ else {
+ if(likely((w->response.data->len || w->response.rlen))) {
+ // we know the content length, put it
+ buffer_sprintf(w->response.header_output, "Content-Length: %zu\r\n", w->response.data->len? w->response.data->len: w->response.rlen);
+ }
+ else {
+ // we don't know the content length, disable keep-alive
+ web_client_disable_keepalive(w);
+ }
+ }
+
+ char uuid[UUID_COMPACT_STR_LEN];
+ uuid_unparse_lower_compact(w->transaction, uuid);
+ buffer_sprintf(w->response.header_output,
+ "X-Transaction-ID: %s\r\n", uuid);
+
+ // end of HTTP header
+ buffer_strcat(w->response.header_output, "\r\n");
+}
+
+static inline void web_client_send_http_header(struct web_client *w) {
+ web_client_build_http_header(w);
+
+ // sent the HTTP header
+ netdata_log_debug(D_WEB_DATA, "%llu: Sending response HTTP header of size %zu: '%s'"
+ , w->id
+ , buffer_strlen(w->response.header_output)
+ , buffer_tostring(w->response.header_output)
+ );
+
+ web_client_cork_socket(w);
+
+ size_t count = 0;
+ ssize_t bytes;
+#ifdef ENABLE_HTTPS
+ if ( (web_client_check_conn_tcp(w)) && (netdata_ssl_web_server_ctx) ) {
+ if (SSL_connection(&w->ssl)) {
+ bytes = netdata_ssl_write(&w->ssl, buffer_tostring(w->response.header_output), buffer_strlen(w->response.header_output));
+ web_client_enable_wait_from_ssl(w);
+ }
+ else {
+ while((bytes = send(w->ofd, buffer_tostring(w->response.header_output), buffer_strlen(w->response.header_output), 0)) == -1) {
+ count++;
+
+ if(count > 100 || (errno != EAGAIN && errno != EWOULDBLOCK)) {
+ netdata_log_error("Cannot send HTTP headers to web client.");
+ break;
+ }
+ }
+ }
+ }
+ else if(web_client_check_conn_tcp(w) || web_client_check_conn_unix(w)) {
+ while((bytes = send(w->ofd, buffer_tostring(w->response.header_output), buffer_strlen(w->response.header_output), 0)) == -1) {
+ count++;
+
+ if(count > 100 || (errno != EAGAIN && errno != EWOULDBLOCK)) {
+ netdata_log_error("Cannot send HTTP headers to web client.");
+ break;
+ }
+ }
+ }
+ else
+ bytes = -999;
+#else
+ if(web_client_check_conn_tcp(w) || web_client_check_conn_unix(w)) {
+ while ((bytes = send(w->ofd, buffer_tostring(w->response.header_output), buffer_strlen(w->response.header_output), 0)) == -1) {
+ count++;
+
+ if (count > 100 || (errno != EAGAIN && errno != EWOULDBLOCK)) {
+ netdata_log_error("Cannot send HTTP headers to web client.");
+ break;
+ }
+ }
+ }
+ else
+ bytes = -999;
+#endif
+
+ if(bytes != (ssize_t) buffer_strlen(w->response.header_output)) {
+ if(bytes > 0)
+ w->statistics.sent_bytes += bytes;
+
+ if (bytes < 0) {
+ netdata_log_error("HTTP headers failed to be sent (I sent %zu bytes but the system sent %zd bytes). Closing web client."
+ , buffer_strlen(w->response.header_output)
+ , bytes);
+
+ WEB_CLIENT_IS_DEAD(w);
+ return;
+ }
+ }
+ else
+ w->statistics.sent_bytes += bytes;
+}
+
+static inline int web_client_switch_host(RRDHOST *host, struct web_client *w, char *url, bool nodeid, int (*func)(RRDHOST *, struct web_client *, char *)) {
+ static uint32_t hash_localhost = 0;
+
+ if(unlikely(!hash_localhost)) {
+ hash_localhost = simple_hash("localhost");
+ }
+
+ if(host != localhost) {
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "Nesting of hosts is not allowed.");
+ return HTTP_RESP_BAD_REQUEST;
+ }
+
+ char *tok = strsep_skip_consecutive_separators(&url, "/");
+ if(tok && *tok) {
+ netdata_log_debug(D_WEB_CLIENT, "%llu: Searching for host with name '%s'.", w->id, tok);
+
+ if(nodeid) {
+ host = find_host_by_node_id(tok);
+ if(!host) {
+ host = rrdhost_find_by_hostname(tok);
+ if (!host)
+ host = rrdhost_find_by_guid(tok);
+ }
+ }
+ else {
+ host = rrdhost_find_by_hostname(tok);
+ if(!host) {
+ host = rrdhost_find_by_guid(tok);
+ if (!host)
+ host = find_host_by_node_id(tok);
+ }
+ }
+
+ if(!host) {
+ // we didn't find it, but it may be a uuid case mismatch for MACHINE_GUID
+ // so, recreate the machine guid in lower-case.
+ nd_uuid_t uuid;
+ char txt[UUID_STR_LEN];
+ if (uuid_parse(tok, uuid) == 0) {
+ uuid_unparse_lower(uuid, txt);
+ host = rrdhost_find_by_guid(txt);
+ }
+ }
+
+ if (host) {
+ if(!url)
+ //no delim found
+ return append_slash_to_url_and_redirect(w);
+
+ size_t len = strlen(url) + 2;
+ char buf[len];
+ buf[0] = '/';
+ strcpy(&buf[1], url);
+ buf[len - 1] = '\0';
+
+ buffer_flush(w->url_path_decoded);
+ buffer_strcat(w->url_path_decoded, buf);
+ return func(host, w, buf);
+ }
+ }
+
+ buffer_flush(w->response.data);
+ w->response.data->content_type = CT_TEXT_HTML;
+ buffer_strcat(w->response.data, "This netdata does not maintain a database for host: ");
+ buffer_strcat_htmlescape(w->response.data, tok?tok:"");
+ return HTTP_RESP_NOT_FOUND;
+}
+
+int web_client_api_request_with_node_selection(RRDHOST *host, struct web_client *w, char *decoded_url_path) {
+ // entry point for all API requests
+
+ ND_LOG_STACK lgs[] = {
+ ND_LOG_FIELD_TXT(NDF_REQUEST_METHOD, HTTP_REQUEST_MODE_2str(w->mode)),
+ ND_LOG_FIELD_BFR(NDF_REQUEST, w->url_as_received),
+ ND_LOG_FIELD_U64(NDF_CONNECTION_ID, w->id),
+ ND_LOG_FIELD_UUID(NDF_TRANSACTION_ID, &w->transaction),
+ ND_LOG_FIELD_UUID(NDF_ACCOUNT_ID, &w->auth.cloud_account_id),
+ ND_LOG_FIELD_TXT(NDF_USER_NAME, w->auth.client_name),
+ ND_LOG_FIELD_TXT(NDF_USER_ROLE, http_id2user_role(w->user_role)),
+ ND_LOG_FIELD_CB(NDF_USER_ACCESS, log_cb_http_access_to_hex, &w->access),
+ ND_LOG_FIELD_END(),
+ };
+ ND_LOG_STACK_PUSH(lgs);
+
+ // give a new transaction id to the request
+ if(uuid_is_null(w->transaction))
+ uuid_generate_random(w->transaction);
+
+ static uint32_t
+ hash_api = 0,
+ hash_host = 0,
+ hash_node = 0;
+
+ if(unlikely(!hash_api)) {
+ hash_api = simple_hash("api");
+ hash_host = simple_hash("host");
+ hash_node = simple_hash("node");
+ }
+
+ char *tok = strsep_skip_consecutive_separators(&decoded_url_path, "/?");
+ if(likely(tok && *tok)) {
+ uint32_t hash = simple_hash(tok);
+
+ if(unlikely(hash == hash_api && strcmp(tok, "api") == 0)) {
+ // current API
+ netdata_log_debug(D_WEB_CLIENT_ACCESS, "%llu: API request ...", w->id);
+ return check_host_and_call(host, w, decoded_url_path, web_client_api_request);
+ }
+ else if(unlikely((hash == hash_host && strcmp(tok, "host") == 0) || (hash == hash_node && strcmp(tok, "node") == 0))) {
+ // host switching
+ netdata_log_debug(D_WEB_CLIENT_ACCESS, "%llu: host switch request ...", w->id);
+ return web_client_switch_host(host, w, decoded_url_path, hash == hash_node, web_client_api_request_with_node_selection);
+ }
+ }
+
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "Unknown API endpoint.");
+ w->response.data->content_type = CT_TEXT_HTML;
+ return HTTP_RESP_NOT_FOUND;
+}
+
+static inline int web_client_process_url(RRDHOST *host, struct web_client *w, char *decoded_url_path) {
+ if(unlikely(!service_running(ABILITY_WEB_REQUESTS)))
+ return web_client_service_unavailable(w);
+
+ static uint32_t
+ hash_api = 0,
+ hash_netdata_conf = 0,
+ hash_host = 0,
+ hash_node = 0,
+ hash_v0 = 0,
+ hash_v1 = 0,
+ hash_v2 = 0;
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ static uint32_t hash_exit = 0, hash_debug = 0, hash_mirror = 0;
+#endif
+
+ if(unlikely(!hash_api)) {
+ hash_api = simple_hash("api");
+ hash_netdata_conf = simple_hash("netdata.conf");
+ hash_host = simple_hash("host");
+ hash_node = simple_hash("node");
+ hash_v0 = simple_hash("v0");
+ hash_v1 = simple_hash("v1");
+ hash_v2 = simple_hash("v2");
+#ifdef NETDATA_INTERNAL_CHECKS
+ hash_exit = simple_hash("exit");
+ hash_debug = simple_hash("debug");
+ hash_mirror = simple_hash("mirror");
+#endif
+ }
+
+ // keep a copy of the decoded path, in case we need to serve it as a filename
+ char filename[FILENAME_MAX + 1];
+ strncpyz(filename, decoded_url_path ? decoded_url_path : "", FILENAME_MAX);
+
+ char *tok = strsep_skip_consecutive_separators(&decoded_url_path, "/?");
+ if(likely(tok && *tok)) {
+ uint32_t hash = simple_hash(tok);
+ netdata_log_debug(D_WEB_CLIENT, "%llu: Processing command '%s'.", w->id, tok);
+
+ if(likely(hash == hash_api && strcmp(tok, "api") == 0)) { // current API
+ netdata_log_debug(D_WEB_CLIENT_ACCESS, "%llu: API request ...", w->id);
+ return check_host_and_call(host, w, decoded_url_path, web_client_api_request);
+ }
+ else if(unlikely((hash == hash_host && strcmp(tok, "host") == 0) || (hash == hash_node && strcmp(tok, "node") == 0))) { // host switching
+ netdata_log_debug(D_WEB_CLIENT_ACCESS, "%llu: host switch request ...", w->id);
+ return web_client_switch_host(host, w, decoded_url_path, hash == hash_node, web_client_process_url);
+ }
+ else if(unlikely(hash == hash_v2 && strcmp(tok, "v2") == 0)) {
+ if(web_client_flag_check(w, WEB_CLIENT_FLAG_PATH_WITH_VERSION))
+ return bad_request_multiple_dashboard_versions(w);
+ web_client_flag_set(w, WEB_CLIENT_FLAG_PATH_IS_V2);
+ return web_client_process_url(host, w, decoded_url_path);
+ }
+ else if(unlikely(hash == hash_v1 && strcmp(tok, "v1") == 0)) {
+ if(web_client_flag_check(w, WEB_CLIENT_FLAG_PATH_WITH_VERSION))
+ return bad_request_multiple_dashboard_versions(w);
+ web_client_flag_set(w, WEB_CLIENT_FLAG_PATH_IS_V1);
+ return web_client_process_url(host, w, decoded_url_path);
+ }
+ else if(unlikely(hash == hash_v0 && strcmp(tok, "v0") == 0)) {
+ if(web_client_flag_check(w, WEB_CLIENT_FLAG_PATH_WITH_VERSION))
+ return bad_request_multiple_dashboard_versions(w);
+ web_client_flag_set(w, WEB_CLIENT_FLAG_PATH_IS_V0);
+ return web_client_process_url(host, w, decoded_url_path);
+ }
+ else if(unlikely(hash == hash_netdata_conf && strcmp(tok, "netdata.conf") == 0)) { // netdata.conf
+ if(unlikely(!http_can_access_netdataconf(w)))
+ return web_client_permission_denied_acl(w);
+
+ netdata_log_debug(D_WEB_CLIENT_ACCESS, "%llu: generating netdata.conf ...", w->id);
+ w->response.data->content_type = CT_TEXT_PLAIN;
+ buffer_flush(w->response.data);
+ config_generate(w->response.data, 0);
+ return HTTP_RESP_OK;
+ }
+#ifdef NETDATA_INTERNAL_CHECKS
+ else if(unlikely(hash == hash_exit && strcmp(tok, "exit") == 0)) {
+ if(unlikely(!http_can_access_netdataconf(w)))
+ return web_client_permission_denied_acl(w);
+
+ w->response.data->content_type = CT_TEXT_PLAIN;
+ buffer_flush(w->response.data);
+
+ if(!netdata_exit)
+ buffer_strcat(w->response.data, "ok, will do...");
+ else
+ buffer_strcat(w->response.data, "I am doing it already");
+
+ netdata_log_error("web request to exit received.");
+ netdata_cleanup_and_exit(0, NULL, NULL, NULL);
+ return HTTP_RESP_OK;
+ }
+ else if(unlikely(hash == hash_debug && strcmp(tok, "debug") == 0)) {
+ if(unlikely(!http_can_access_netdataconf(w)))
+ return web_client_permission_denied_acl(w);
+
+ buffer_flush(w->response.data);
+
+ // get the name of the data to show
+ tok = strsep_skip_consecutive_separators(&decoded_url_path, "&");
+ if(tok && *tok) {
+ netdata_log_debug(D_WEB_CLIENT, "%llu: Searching for RRD data with name '%s'.", w->id, tok);
+
+ // do we have such a data set?
+ RRDSET *st = rrdset_find_byname(host, tok);
+ if(!st) st = rrdset_find(host, tok);
+ if(!st) {
+ w->response.data->content_type = CT_TEXT_HTML;
+ buffer_strcat(w->response.data, "Chart is not found: ");
+ buffer_strcat_htmlescape(w->response.data, tok);
+ netdata_log_debug(D_WEB_CLIENT_ACCESS, "%llu: %s is not found.", w->id, tok);
+ return HTTP_RESP_NOT_FOUND;
+ }
+
+ debug_flags |= D_RRD_STATS;
+
+ if(rrdset_flag_check(st, RRDSET_FLAG_DEBUG))
+ rrdset_flag_clear(st, RRDSET_FLAG_DEBUG);
+ else
+ rrdset_flag_set(st, RRDSET_FLAG_DEBUG);
+
+ w->response.data->content_type = CT_TEXT_HTML;
+ buffer_sprintf(w->response.data, "Chart has now debug %s: ", rrdset_flag_check(st, RRDSET_FLAG_DEBUG)?"enabled":"disabled");
+ buffer_strcat_htmlescape(w->response.data, tok);
+ netdata_log_debug(D_WEB_CLIENT_ACCESS, "%llu: debug for %s is %s.", w->id, tok, rrdset_flag_check(st, RRDSET_FLAG_DEBUG)?"enabled":"disabled");
+ return HTTP_RESP_OK;
+ }
+
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "debug which chart?\r\n");
+ return HTTP_RESP_BAD_REQUEST;
+ }
+ else if(unlikely(hash == hash_mirror && strcmp(tok, "mirror") == 0)) {
+ if(unlikely(!http_can_access_netdataconf(w)))
+ return web_client_permission_denied_acl(w);
+
+ netdata_log_debug(D_WEB_CLIENT_ACCESS, "%llu: Mirroring...", w->id);
+
+ // replace the zero bytes with spaces
+ buffer_char_replace(w->response.data, '\0', ' ');
+
+ // just leave the buffer as-is
+ // it will be copied back to the client
+
+ return HTTP_RESP_OK;
+ }
+#endif /* NETDATA_INTERNAL_CHECKS */
+ }
+
+ buffer_flush(w->response.data);
+ return mysendfile(w, filename);
+}
+
+static bool web_server_log_transport(BUFFER *wb, void *ptr) {
+ struct web_client *w = ptr;
+ if(!w)
+ return false;
+
+#ifdef ENABLE_HTTPS
+ buffer_strcat(wb, SSL_connection(&w->ssl) ? "https" : "http");
+#else
+ buffer_strcat(wb, "http");
+#endif
+ return true;
+}
+
+void web_client_process_request_from_web_server(struct web_client *w) {
+ // entry point for web server requests
+
+ ND_LOG_STACK lgs[] = {
+ ND_LOG_FIELD_CB(NDF_SRC_TRANSPORT, web_server_log_transport, w),
+ ND_LOG_FIELD_TXT(NDF_SRC_IP, w->client_ip),
+ ND_LOG_FIELD_TXT(NDF_SRC_PORT, w->client_port),
+ ND_LOG_FIELD_TXT(NDF_SRC_FORWARDED_HOST, w->forwarded_host),
+ ND_LOG_FIELD_TXT(NDF_SRC_FORWARDED_FOR, w->forwarded_for),
+ ND_LOG_FIELD_TXT(NDF_NIDL_NODE, w->client_host),
+ ND_LOG_FIELD_TXT(NDF_REQUEST_METHOD, HTTP_REQUEST_MODE_2str(w->mode)),
+ ND_LOG_FIELD_BFR(NDF_REQUEST, w->url_as_received),
+ ND_LOG_FIELD_U64(NDF_CONNECTION_ID, w->id),
+ ND_LOG_FIELD_UUID(NDF_TRANSACTION_ID, &w->transaction),
+ ND_LOG_FIELD_UUID(NDF_ACCOUNT_ID, &w->auth.cloud_account_id),
+ ND_LOG_FIELD_TXT(NDF_USER_NAME, w->auth.client_name),
+ ND_LOG_FIELD_TXT(NDF_USER_ROLE, http_id2user_role(w->user_role)),
+ ND_LOG_FIELD_CB(NDF_USER_ACCESS, log_cb_http_access_to_hex, &w->access),
+ ND_LOG_FIELD_END(),
+ };
+ ND_LOG_STACK_PUSH(lgs);
+
+ // give a new transaction id to the request
+ if(uuid_is_null(w->transaction))
+ uuid_generate_random(w->transaction);
+
+ // start timing us
+ web_client_timeout_checkpoint_init(w);
+
+ switch(http_request_validate(w)) {
+ case HTTP_VALIDATION_OK:
+ if(!web_client_flag_check(w, WEB_CLIENT_FLAG_PROGRESS_TRACKING)) {
+ web_client_flag_set(w, WEB_CLIENT_FLAG_PROGRESS_TRACKING);
+ query_progress_start_or_update(&w->transaction, 0, w->mode, w->acl,
+ buffer_tostring(w->url_as_received),
+ w->payload,
+ w->forwarded_for ? w->forwarded_for : w->client_ip);
+ }
+
+ switch(w->mode) {
+ case HTTP_REQUEST_MODE_STREAM:
+ if(unlikely(!http_can_access_stream(w))) {
+ web_client_permission_denied_acl(w);
+ return;
+ }
+
+ w->response.code = rrdpush_receiver_thread_spawn(w, (char *)buffer_tostring(w->url_query_string_decoded), NULL);
+ return;
+
+ case HTTP_REQUEST_MODE_OPTIONS:
+ if(unlikely(
+ !http_can_access_dashboard(w) &&
+ !http_can_access_registry(w) &&
+ !http_can_access_badges(w) &&
+ !http_can_access_mgmt(w) &&
+ !http_can_access_netdataconf(w)
+ )) {
+ web_client_permission_denied_acl(w);
+ break;
+ }
+
+ w->response.data->content_type = CT_TEXT_PLAIN;
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "OK");
+ w->response.code = HTTP_RESP_OK;
+ break;
+
+ case HTTP_REQUEST_MODE_FILECOPY:
+ case HTTP_REQUEST_MODE_POST:
+ case HTTP_REQUEST_MODE_GET:
+ case HTTP_REQUEST_MODE_PUT:
+ case HTTP_REQUEST_MODE_DELETE:
+ if(unlikely(
+ !http_can_access_dashboard(w) &&
+ !http_can_access_registry(w) &&
+ !http_can_access_badges(w) &&
+ !http_can_access_mgmt(w) &&
+ !http_can_access_netdataconf(w)
+ )) {
+ web_client_permission_denied_acl(w);
+ break;
+ }
+
+ web_client_reset_path_flags(w);
+
+ // find if the URL path has a filename extension
+ char path[FILENAME_MAX + 1];
+ strncpyz(path, buffer_tostring(w->url_path_decoded), FILENAME_MAX);
+ char *s = path, *e = path;
+
+ // remove the query string and find the last char
+ for (; *e ; e++) {
+ if (*e == '?')
+ break;
+ }
+
+ if(e == s || (*(e - 1) == '/'))
+ web_client_flag_set(w, WEB_CLIENT_FLAG_PATH_HAS_TRAILING_SLASH);
+
+ // check if there is a filename extension
+ while (--e > s) {
+ if (*e == '/')
+ break;
+ if(*e == '.') {
+ web_client_flag_set(w, WEB_CLIENT_FLAG_PATH_HAS_FILE_EXTENSION);
+ break;
+ }
+ }
+
+ w->response.code = (short)web_client_process_url(localhost, w, path);
+ break;
+
+ default:
+ web_client_permission_denied_acl(w);
+ return;
+ }
+ break;
+
+ case HTTP_VALIDATION_INCOMPLETE:
+ if(w->response.data->len > NETDATA_WEB_REQUEST_MAX_SIZE) {
+ buffer_flush(w->url_as_received);
+ buffer_strcat(w->url_as_received, "too big request");
+
+ netdata_log_debug(D_WEB_CLIENT_ACCESS, "%llu: Received request is too big (%zu bytes).", w->id, w->response.data->len);
+
+ size_t len = w->response.data->len;
+ buffer_flush(w->response.data);
+ buffer_sprintf(w->response.data, "Received request is too big (received %zu bytes, max is %zu bytes).\r\n", len, (size_t)NETDATA_WEB_REQUEST_MAX_SIZE);
+ w->response.code = HTTP_RESP_BAD_REQUEST;
+ }
+ else {
+ // wait for more data
+ // set to normal to prevent web_server_rcv_callback
+ // from going into stream mode
+ if (w->mode == HTTP_REQUEST_MODE_STREAM)
+ w->mode = HTTP_REQUEST_MODE_GET;
+ return;
+ }
+ break;
+#ifdef ENABLE_HTTPS
+ case HTTP_VALIDATION_REDIRECT:
+ {
+ buffer_flush(w->response.data);
+ w->response.data->content_type = CT_TEXT_HTML;
+ buffer_strcat(w->response.data,
+ "<!DOCTYPE html><!-- SPDX-License-Identifier: GPL-3.0-or-later --><html>"
+ "<body onload=\"window.location.href ='https://'+ window.location.hostname +"
+ " ':' + window.location.port + window.location.pathname + window.location.search\">"
+ "Redirecting to safety connection, case your browser does not support redirection, please"
+ " click <a onclick=\"window.location.href ='https://'+ window.location.hostname + ':' "
+ " + window.location.port + window.location.pathname + window.location.search\">here</a>."
+ "</body></html>");
+ w->response.code = HTTP_RESP_HTTPS_UPGRADE;
+ break;
+ }
+#endif
+ case HTTP_VALIDATION_MALFORMED_URL:
+ netdata_log_debug(D_WEB_CLIENT_ACCESS, "%llu: Malformed URL '%s'.", w->id, w->response.data->buffer);
+
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "Malformed URL...\r\n");
+ w->response.code = HTTP_RESP_BAD_REQUEST;
+ break;
+ case HTTP_VALIDATION_EXCESS_REQUEST_DATA:
+ netdata_log_debug(D_WEB_CLIENT_ACCESS, "%llu: Excess data in request '%s'.", w->id, w->response.data->buffer);
+
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "Excess data in request.\r\n");
+ w->response.code = HTTP_RESP_BAD_REQUEST;
+ break;
+ case HTTP_VALIDATION_TOO_MANY_READ_RETRIES:
+ netdata_log_debug(D_WEB_CLIENT_ACCESS, "%llu: Too many retries to read request '%s'.", w->id, w->response.data->buffer);
+
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "Too many retries to read request.\r\n");
+ w->response.code = HTTP_RESP_BAD_REQUEST;
+ break;
+ case HTTP_VALIDATION_NOT_SUPPORTED:
+ netdata_log_debug(D_WEB_CLIENT_ACCESS, "%llu: HTTP method requested is not supported '%s'.", w->id, w->response.data->buffer);
+
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "HTTP method requested is not supported...\r\n");
+ w->response.code = HTTP_RESP_BAD_REQUEST;
+ break;
+ }
+
+ // keep track of the processing time
+ web_client_timeout_checkpoint_response_ready(w, NULL);
+
+ w->response.sent = 0;
+
+ web_client_send_http_header(w);
+
+ // enable sending immediately if we have data
+ if(w->response.data->len) web_client_enable_wait_send(w);
+ else web_client_disable_wait_send(w);
+
+ switch(w->mode) {
+ case HTTP_REQUEST_MODE_STREAM:
+ netdata_log_debug(D_WEB_CLIENT, "%llu: STREAM done.", w->id);
+ break;
+
+ case HTTP_REQUEST_MODE_OPTIONS:
+ netdata_log_debug(D_WEB_CLIENT, "%llu: Done preparing the OPTIONS response. Sending data (%zu bytes) to client.", w->id, w->response.data->len);
+ break;
+
+ case HTTP_REQUEST_MODE_POST:
+ case HTTP_REQUEST_MODE_GET:
+ case HTTP_REQUEST_MODE_PUT:
+ case HTTP_REQUEST_MODE_DELETE:
+ netdata_log_debug(D_WEB_CLIENT, "%llu: Done preparing the response. Sending data (%zu bytes) to client.", w->id, w->response.data->len);
+ break;
+
+ case HTTP_REQUEST_MODE_FILECOPY:
+ if(w->response.rlen) {
+ netdata_log_debug(D_WEB_CLIENT, "%llu: Done preparing the response. Will be sending data file of %zu bytes to client.", w->id, w->response.rlen);
+ web_client_enable_wait_receive(w);
+
+ /*
+ // utilize the kernel sendfile() for copying the file to the socket.
+ // this block of code can be commented, without anything missing.
+ // when it is commented, the program will copy the data using async I/O.
+ {
+ long len = sendfile(w->ofd, w->ifd, NULL, w->response.data->rbytes);
+ if(len != w->response.data->rbytes)
+ netdata_log_error("%llu: sendfile() should copy %ld bytes, but copied %ld. Falling back to manual copy.", w->id, w->response.data->rbytes, len);
+ else
+ web_client_request_done(w);
+ }
+ */
+ }
+ else
+ netdata_log_debug(D_WEB_CLIENT, "%llu: Done preparing the response. Will be sending an unknown amount of bytes to client.", w->id);
+ break;
+
+ default:
+ fatal("%llu: Unknown client mode %u.", w->id, w->mode);
+ break;
+ }
+}
+
+ssize_t web_client_send_chunk_header(struct web_client *w, size_t len)
+{
+ netdata_log_debug(D_DEFLATE, "%llu: OPEN CHUNK of %zu bytes (hex: %zx).", w->id, len, len);
+ char buf[24];
+ ssize_t bytes;
+ bytes = (ssize_t)sprintf(buf, "%zX\r\n", len);
+ buf[bytes] = 0x00;
+
+ bytes = web_client_send_data(w,buf,strlen(buf),0);
+ if(bytes > 0) {
+ netdata_log_debug(D_DEFLATE, "%llu: Sent chunk header %zd bytes.", w->id, bytes);
+ w->statistics.sent_bytes += bytes;
+ }
+
+ else if(bytes == 0) {
+ netdata_log_debug(D_WEB_CLIENT, "%llu: Did not send chunk header to the client.", w->id);
+ }
+ else {
+ netdata_log_debug(D_WEB_CLIENT, "%llu: Failed to send chunk header to client.", w->id);
+ WEB_CLIENT_IS_DEAD(w);
+ }
+
+ return bytes;
+}
+
+ssize_t web_client_send_chunk_close(struct web_client *w)
+{
+ //debug(D_DEFLATE, "%llu: CLOSE CHUNK.", w->id);
+
+ ssize_t bytes;
+ bytes = web_client_send_data(w,"\r\n",2,0);
+ if(bytes > 0) {
+ netdata_log_debug(D_DEFLATE, "%llu: Sent chunk suffix %zd bytes.", w->id, bytes);
+ w->statistics.sent_bytes += bytes;
+ }
+
+ else if(bytes == 0) {
+ netdata_log_debug(D_WEB_CLIENT, "%llu: Did not send chunk suffix to the client.", w->id);
+ }
+ else {
+ netdata_log_debug(D_WEB_CLIENT, "%llu: Failed to send chunk suffix to client.", w->id);
+ WEB_CLIENT_IS_DEAD(w);
+ }
+
+ return bytes;
+}
+
+ssize_t web_client_send_chunk_finalize(struct web_client *w)
+{
+ //debug(D_DEFLATE, "%llu: FINALIZE CHUNK.", w->id);
+
+ ssize_t bytes;
+ bytes = web_client_send_data(w,"\r\n0\r\n\r\n",7,0);
+ if(bytes > 0) {
+ netdata_log_debug(D_DEFLATE, "%llu: Sent chunk suffix %zd bytes.", w->id, bytes);
+ w->statistics.sent_bytes += bytes;
+ }
+
+ else if(bytes == 0) {
+ netdata_log_debug(D_WEB_CLIENT, "%llu: Did not send chunk finalize suffix to the client.", w->id);
+ }
+ else {
+ netdata_log_debug(D_WEB_CLIENT, "%llu: Failed to send chunk finalize suffix to client.", w->id);
+ WEB_CLIENT_IS_DEAD(w);
+ }
+
+ return bytes;
+}
+
+ssize_t web_client_send_deflate(struct web_client *w)
+{
+ ssize_t len = 0, t = 0;
+
+ // when using compression,
+ // w->response.sent is the amount of bytes passed through compression
+
+ netdata_log_debug(D_DEFLATE, "%llu: web_client_send_deflate(): w->response.data->len = %zu, w->response.sent = %zu, w->response.zhave = %zu, w->response.zsent = %zu, w->response.zstream.avail_in = %u, w->response.zstream.avail_out = %u, w->response.zstream.total_in = %lu, w->response.zstream.total_out = %lu.",
+ w->id, w->response.data->len, w->response.sent, w->response.zhave, w->response.zsent, w->response.zstream.avail_in, w->response.zstream.avail_out, w->response.zstream.total_in, w->response.zstream.total_out);
+
+ if(w->response.data->len - w->response.sent == 0 && w->response.zstream.avail_in == 0 && w->response.zhave == w->response.zsent && w->response.zstream.avail_out != 0) {
+ // there is nothing to send
+
+ netdata_log_debug(D_WEB_CLIENT, "%llu: Out of output data.", w->id);
+
+ // finalize the chunk
+ if(w->response.sent != 0) {
+ t = web_client_send_chunk_finalize(w);
+ if(t < 0) return t;
+ }
+
+ if(w->mode == HTTP_REQUEST_MODE_FILECOPY && web_client_has_wait_receive(w) && w->response.rlen && w->response.rlen > w->response.data->len) {
+ // we have to wait, more data will come
+ netdata_log_debug(D_WEB_CLIENT, "%llu: Waiting for more data to become available.", w->id);
+ web_client_disable_wait_send(w);
+ return t;
+ }
+
+ if(unlikely(!web_client_has_keepalive(w))) {
+ netdata_log_debug(D_WEB_CLIENT, "%llu: Closing (keep-alive is not enabled). %zu bytes sent.", w->id, w->response.sent);
+ WEB_CLIENT_IS_DEAD(w);
+ return t;
+ }
+
+ // reset the client
+ web_client_request_done(w);
+ netdata_log_debug(D_WEB_CLIENT, "%llu: Done sending all data on socket.", w->id);
+ return t;
+ }
+
+ if(w->response.zhave == w->response.zsent) {
+ // compress more input data
+
+ // close the previous open chunk
+ if(w->response.sent != 0) {
+ t = web_client_send_chunk_close(w);
+ if(t < 0) return t;
+ }
+
+ netdata_log_debug(D_DEFLATE, "%llu: Compressing %zu new bytes starting from %zu (and %u left behind).", w->id, (w->response.data->len - w->response.sent), w->response.sent, w->response.zstream.avail_in);
+
+ // give the compressor all the data not passed through the compressor yet
+ if(w->response.data->len > w->response.sent) {
+ w->response.zstream.next_in = (Bytef *)&w->response.data->buffer[w->response.sent - w->response.zstream.avail_in];
+ w->response.zstream.avail_in += (uInt) (w->response.data->len - w->response.sent);
+ }
+
+ // reset the compressor output buffer
+ w->response.zstream.next_out = w->response.zbuffer;
+ w->response.zstream.avail_out = NETDATA_WEB_RESPONSE_ZLIB_CHUNK_SIZE;
+
+ // ask for FINISH if we have all the input
+ int flush = Z_SYNC_FLUSH;
+ if((w->mode == HTTP_REQUEST_MODE_GET || w->mode == HTTP_REQUEST_MODE_POST || w->mode == HTTP_REQUEST_MODE_PUT || w->mode == HTTP_REQUEST_MODE_DELETE)
+ || (w->mode == HTTP_REQUEST_MODE_FILECOPY && !web_client_has_wait_receive(w) && w->response.data->len == w->response.rlen)) {
+ flush = Z_FINISH;
+ netdata_log_debug(D_DEFLATE, "%llu: Requesting Z_FINISH, if possible.", w->id);
+ }
+ else {
+ netdata_log_debug(D_DEFLATE, "%llu: Requesting Z_SYNC_FLUSH.", w->id);
+ }
+
+ // compress
+ if(deflate(&w->response.zstream, flush) == Z_STREAM_ERROR) {
+ netdata_log_error("%llu: Compression failed. Closing down client.", w->id);
+ web_client_request_done(w);
+ return(-1);
+ }
+
+ w->response.zhave = NETDATA_WEB_RESPONSE_ZLIB_CHUNK_SIZE - w->response.zstream.avail_out;
+ w->response.zsent = 0;
+
+ // keep track of the bytes passed through the compressor
+ w->response.sent = w->response.data->len;
+
+ netdata_log_debug(D_DEFLATE, "%llu: Compression produced %zu bytes.", w->id, w->response.zhave);
+
+ // open a new chunk
+ ssize_t t2 = web_client_send_chunk_header(w, w->response.zhave);
+ if(t2 < 0) return t2;
+ t += t2;
+ }
+
+ netdata_log_debug(D_WEB_CLIENT, "%llu: Sending %zu bytes of data (+%zd of chunk header).", w->id, w->response.zhave - w->response.zsent, t);
+
+ len = web_client_send_data(w,&w->response.zbuffer[w->response.zsent], (size_t) (w->response.zhave - w->response.zsent), MSG_DONTWAIT);
+ if(len > 0) {
+ w->statistics.sent_bytes += len;
+ w->response.zsent += len;
+ len += t;
+ netdata_log_debug(D_WEB_CLIENT, "%llu: Sent %zd bytes.", w->id, len);
+ }
+ else if(len == 0) {
+ netdata_log_debug(D_WEB_CLIENT, "%llu: Did not send any bytes to the client (zhave = %zu, zsent = %zu, need to send = %zu).",
+ w->id, w->response.zhave, w->response.zsent, w->response.zhave - w->response.zsent);
+
+ }
+ else {
+ netdata_log_debug(D_WEB_CLIENT, "%llu: Failed to send data to client.", w->id);
+ WEB_CLIENT_IS_DEAD(w);
+ }
+
+ return(len);
+}
+
+ssize_t web_client_send(struct web_client *w) {
+ if(likely(w->response.zoutput)) return web_client_send_deflate(w);
+
+ ssize_t bytes;
+
+ if(unlikely(w->response.data->len - w->response.sent == 0)) {
+ // there is nothing to send
+
+ netdata_log_debug(D_WEB_CLIENT, "%llu: Out of output data.", w->id);
+
+ // there can be two cases for this
+ // A. we have done everything
+ // B. we temporarily have nothing to send, waiting for the buffer to be filled by ifd
+
+ if(w->mode == HTTP_REQUEST_MODE_FILECOPY && web_client_has_wait_receive(w) && w->response.rlen && w->response.rlen > w->response.data->len) {
+ // we have to wait, more data will come
+ netdata_log_debug(D_WEB_CLIENT, "%llu: Waiting for more data to become available.", w->id);
+ web_client_disable_wait_send(w);
+ return 0;
+ }
+
+ if(unlikely(!web_client_has_keepalive(w))) {
+ netdata_log_debug(D_WEB_CLIENT, "%llu: Closing (keep-alive is not enabled). %zu bytes sent.", w->id, w->response.sent);
+ WEB_CLIENT_IS_DEAD(w);
+ return 0;
+ }
+
+ web_client_request_done(w);
+ netdata_log_debug(D_WEB_CLIENT, "%llu: Done sending all data on socket. Waiting for next request on the same socket.", w->id);
+ return 0;
+ }
+
+ bytes = web_client_send_data(w,&w->response.data->buffer[w->response.sent], w->response.data->len - w->response.sent, MSG_DONTWAIT);
+ if(likely(bytes > 0)) {
+ w->statistics.sent_bytes += bytes;
+ w->response.sent += bytes;
+ netdata_log_debug(D_WEB_CLIENT, "%llu: Sent %zd bytes.", w->id, bytes);
+ }
+ else if(likely(bytes == 0)) {
+ netdata_log_debug(D_WEB_CLIENT, "%llu: Did not send any bytes to the client.", w->id);
+ }
+ else {
+ netdata_log_debug(D_WEB_CLIENT, "%llu: Failed to send data to client.", w->id);
+ WEB_CLIENT_IS_DEAD(w);
+ }
+
+ return(bytes);
+}
+
+ssize_t web_client_read_file(struct web_client *w)
+{
+ if(unlikely(w->response.rlen > w->response.data->size))
+ buffer_need_bytes(w->response.data, w->response.rlen - w->response.data->size);
+
+ if(unlikely(w->response.rlen <= w->response.data->len))
+ return 0;
+
+ ssize_t left = (ssize_t)(w->response.rlen - w->response.data->len);
+ ssize_t bytes = read(w->ifd, &w->response.data->buffer[w->response.data->len], (size_t)left);
+ if(likely(bytes > 0)) {
+ size_t old = w->response.data->len;
+ (void)old;
+
+ w->response.data->len += bytes;
+ w->response.data->buffer[w->response.data->len] = '\0';
+
+ netdata_log_debug(D_WEB_CLIENT, "%llu: Read %zd bytes.", w->id, bytes);
+ netdata_log_debug(D_WEB_DATA, "%llu: Read data: '%s'.", w->id, &w->response.data->buffer[old]);
+
+ web_client_enable_wait_send(w);
+
+ if(w->response.rlen && w->response.data->len >= w->response.rlen)
+ web_client_disable_wait_receive(w);
+ }
+ else if(likely(bytes == 0)) {
+ netdata_log_debug(D_WEB_CLIENT, "%llu: Out of input file data.", w->id);
+
+ // if we cannot read, it means we have an error on input.
+ // if however, we are copying a file from ifd to ofd, we should not return an error.
+ // in this case, the error should be generated when the file has been sent to the client.
+
+ // we are copying data from ifd to ofd
+ // let it finish copying...
+ web_client_disable_wait_receive(w);
+
+ netdata_log_debug(D_WEB_CLIENT, "%llu: Read the whole file.", w->id);
+
+ if(web_server_mode != WEB_SERVER_MODE_STATIC_THREADED) {
+ if (w->ifd != w->ofd) close(w->ifd);
+ }
+
+ w->ifd = w->ofd;
+ }
+ else {
+ netdata_log_debug(D_WEB_CLIENT, "%llu: read data failed.", w->id);
+ WEB_CLIENT_IS_DEAD(w);
+ }
+
+ return(bytes);
+}
+
+ssize_t web_client_receive(struct web_client *w)
+{
+ if(unlikely(w->mode == HTTP_REQUEST_MODE_FILECOPY))
+ return web_client_read_file(w);
+
+ ssize_t bytes;
+ ssize_t left = (ssize_t)(w->response.data->size - w->response.data->len);
+
+ // do we have any space for more data?
+ buffer_need_bytes(w->response.data, NETDATA_WEB_REQUEST_INITIAL_SIZE);
+
+ errno = 0;
+
+#ifdef ENABLE_HTTPS
+ if ( (web_client_check_conn_tcp(w)) && (netdata_ssl_web_server_ctx) ) {
+ if (SSL_connection(&w->ssl)) {
+ bytes = netdata_ssl_read(&w->ssl, &w->response.data->buffer[w->response.data->len], (size_t) (left - 1));
+ web_client_enable_wait_from_ssl(w);
+ }
+ else {
+ bytes = recv(w->ifd, &w->response.data->buffer[w->response.data->len], (size_t) (left - 1), MSG_DONTWAIT);
+ }
+ }
+ else if(web_client_check_conn_tcp(w) || web_client_check_conn_unix(w)) {
+ bytes = recv(w->ifd, &w->response.data->buffer[w->response.data->len], (size_t) (left - 1), MSG_DONTWAIT);
+ }
+ else // other connection methods
+ bytes = -1;
+#else
+ if(web_client_check_conn_tcp(w) || web_client_check_conn_unix(w))
+ bytes = recv(w->ifd, &w->response.data->buffer[w->response.data->len], (size_t) (left - 1), MSG_DONTWAIT);
+ else
+ bytes = -1;
+#endif
+
+ if(likely(bytes > 0)) {
+ w->statistics.received_bytes += bytes;
+
+ size_t old = w->response.data->len;
+ (void)old;
+
+ w->response.data->len += bytes;
+ w->response.data->buffer[w->response.data->len] = '\0';
+
+ netdata_log_debug(D_WEB_CLIENT, "%llu: Received %zd bytes.", w->id, bytes);
+ netdata_log_debug(D_WEB_DATA, "%llu: Received data: '%s'.", w->id, &w->response.data->buffer[old]);
+ }
+ else if(unlikely(bytes < 0 && (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR))) {
+ web_client_enable_wait_receive(w);
+ return 0;
+ }
+ else if (bytes < 0) {
+ netdata_log_debug(D_WEB_CLIENT, "%llu: receive data failed.", w->id);
+ WEB_CLIENT_IS_DEAD(w);
+ } else
+ netdata_log_debug(D_WEB_CLIENT, "%llu: Received %zd bytes.", w->id, bytes);
+
+ return(bytes);
+}
+
+void web_client_decode_path_and_query_string(struct web_client *w, const char *path_and_query_string) {
+ char buffer[NETDATA_WEB_REQUEST_URL_SIZE + 2];
+ buffer[0] = '\0';
+
+ buffer_flush(w->url_path_decoded);
+ buffer_flush(w->url_query_string_decoded);
+
+ if(buffer_strlen(w->url_as_received) == 0)
+ // do not overwrite this if it is already filled
+ buffer_strcat(w->url_as_received, path_and_query_string);
+
+ if(w->mode == HTTP_REQUEST_MODE_STREAM) {
+ // in stream mode, there is no path
+
+ url_decode_r(buffer, path_and_query_string, NETDATA_WEB_REQUEST_URL_SIZE + 1);
+
+ buffer[NETDATA_WEB_REQUEST_URL_SIZE + 1] = '\0';
+ buffer_strcat(w->url_query_string_decoded, buffer);
+ }
+ else {
+ // in non-stream mode, there is a path
+ // FIXME - the way this is implemented, query string params never accept the symbol &, not even encoded as %26
+ // To support the symbol & in query string params, we need to turn the url_query_string_decoded into a
+ // dictionary and decode each of the parameters individually.
+ // OR: in url_query_string_decoded use as separator a control character that cannot appear in the URL.
+
+ url_decode_r(buffer, path_and_query_string, NETDATA_WEB_REQUEST_URL_SIZE + 1);
+
+ char *question_mark_start = strchr(buffer, '?');
+ if (question_mark_start) {
+ buffer_strcat(w->url_query_string_decoded, question_mark_start);
+ char c = *question_mark_start;
+ *question_mark_start = '\0';
+ buffer_strcat(w->url_path_decoded, buffer);
+ *question_mark_start = c;
+ } else {
+ buffer_strcat(w->url_query_string_decoded, "");
+ buffer_strcat(w->url_path_decoded, buffer);
+ }
+ }
+}
+
+void web_client_reuse_from_cache(struct web_client *w) {
+ // zero everything about it - but keep the buffers
+
+ web_client_reset_allocations(w, false);
+
+ // remember the pointers to the buffers
+ BUFFER *b1 = w->response.data;
+ BUFFER *b2 = w->response.header;
+ BUFFER *b3 = w->response.header_output;
+ BUFFER *b4 = w->url_path_decoded;
+ BUFFER *b5 = w->url_as_received;
+ BUFFER *b6 = w->url_query_string_decoded;
+ BUFFER *b7 = w->payload;
+
+#ifdef ENABLE_HTTPS
+ NETDATA_SSL ssl = w->ssl;
+#endif
+
+ size_t use_count = w->use_count;
+ size_t *statistics_memory_accounting = w->statistics.memory_accounting;
+
+ // zero everything
+ memset(w, 0, sizeof(struct web_client));
+
+ w->ifd = w->ofd = -1;
+ w->statistics.memory_accounting = statistics_memory_accounting;
+ w->use_count = use_count;
+
+#ifdef ENABLE_HTTPS
+ w->ssl = ssl;
+#endif
+
+ // restore the pointers of the buffers
+ w->response.data = b1;
+ w->response.header = b2;
+ w->response.header_output = b3;
+ w->url_path_decoded = b4;
+ w->url_as_received = b5;
+ w->url_query_string_decoded = b6;
+ w->payload = b7;
+}
+
+struct web_client *web_client_create(size_t *statistics_memory_accounting) {
+ struct web_client *w = (struct web_client *)callocz(1, sizeof(struct web_client));
+
+#ifdef ENABLE_HTTPS
+ w->ssl = NETDATA_SSL_UNSET_CONNECTION;
+#endif
+
+ w->use_count = 1;
+ w->statistics.memory_accounting = statistics_memory_accounting;
+
+ w->url_as_received = buffer_create(NETDATA_WEB_DECODED_URL_INITIAL_SIZE, w->statistics.memory_accounting);
+ w->url_path_decoded = buffer_create(NETDATA_WEB_DECODED_URL_INITIAL_SIZE, w->statistics.memory_accounting);
+ w->url_query_string_decoded = buffer_create(NETDATA_WEB_DECODED_URL_INITIAL_SIZE, w->statistics.memory_accounting);
+ w->response.data = buffer_create(NETDATA_WEB_RESPONSE_INITIAL_SIZE, w->statistics.memory_accounting);
+ w->response.header = buffer_create(NETDATA_WEB_RESPONSE_HEADER_INITIAL_SIZE, w->statistics.memory_accounting);
+ w->response.header_output = buffer_create(NETDATA_WEB_RESPONSE_HEADER_INITIAL_SIZE, w->statistics.memory_accounting);
+
+ __atomic_add_fetch(w->statistics.memory_accounting, sizeof(struct web_client), __ATOMIC_RELAXED);
+
+ return w;
+}
+
+void web_client_free(struct web_client *w) {
+#ifdef ENABLE_HTTPS
+ netdata_ssl_close(&w->ssl);
+#endif
+
+ web_client_reset_allocations(w, true);
+
+ __atomic_sub_fetch(w->statistics.memory_accounting, sizeof(struct web_client), __ATOMIC_RELAXED);
+ freez(w);
+}
+
+inline void web_client_timeout_checkpoint_init(struct web_client *w) {
+ now_monotonic_high_precision_timeval(&w->timings.tv_in);
+}
+
+inline void web_client_timeout_checkpoint_set(struct web_client *w, int timeout_ms) {
+ w->timings.timeout_ut = timeout_ms * USEC_PER_MS;
+
+ if(!w->timings.tv_in.tv_sec)
+ web_client_timeout_checkpoint_init(w);
+
+ if(!w->timings.tv_timeout_last_checkpoint.tv_sec)
+ w->timings.tv_timeout_last_checkpoint = w->timings.tv_in;
+}
+
+inline usec_t web_client_timeout_checkpoint(struct web_client *w) {
+ struct timeval now;
+ now_monotonic_high_precision_timeval(&now);
+
+ if (!w->timings.tv_timeout_last_checkpoint.tv_sec)
+ w->timings.tv_timeout_last_checkpoint = w->timings.tv_in;
+
+ usec_t since_last_check_ut = dt_usec(&w->timings.tv_timeout_last_checkpoint, &now);
+
+ w->timings.tv_timeout_last_checkpoint = now;
+
+ return since_last_check_ut;
+}
+
+inline usec_t web_client_timeout_checkpoint_response_ready(struct web_client *w, usec_t *usec_since_last_checkpoint) {
+ usec_t since_last_check_ut = web_client_timeout_checkpoint(w);
+ if(usec_since_last_checkpoint)
+ *usec_since_last_checkpoint = since_last_check_ut;
+
+ w->timings.tv_ready = w->timings.tv_timeout_last_checkpoint;
+
+ // return the total time of the query
+ return dt_usec(&w->timings.tv_in, &w->timings.tv_ready);
+}
+
+inline bool web_client_timeout_checkpoint_and_check(struct web_client *w, usec_t *usec_since_last_checkpoint) {
+
+ usec_t since_last_check_ut = web_client_timeout_checkpoint(w);
+ if(usec_since_last_checkpoint)
+ *usec_since_last_checkpoint = since_last_check_ut;
+
+ if(!w->timings.timeout_ut)
+ return false;
+
+ usec_t since_reception_ut = dt_usec(&w->timings.tv_in, &w->timings.tv_timeout_last_checkpoint);
+ if (since_reception_ut >= w->timings.timeout_ut) {
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "Query timeout exceeded");
+ w->response.code = HTTP_RESP_GATEWAY_TIMEOUT;
+ return true;
+ }
+
+ return false;
+}
diff --git a/src/web/server/web_client.h b/src/web/server/web_client.h
new file mode 100644
index 000000000..650ddb3eb
--- /dev/null
+++ b/src/web/server/web_client.h
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_WEB_CLIENT_H
+#define NETDATA_WEB_CLIENT_H 1
+
+#include "libnetdata/libnetdata.h"
+
+struct web_client;
+
+extern int web_enable_gzip, web_gzip_level, web_gzip_strategy;
+
+#define HTTP_REQ_MAX_HEADER_FETCH_TRIES 100
+
+extern int respect_web_browser_do_not_track_policy;
+extern char *web_x_frame_options;
+
+typedef enum __attribute__((packed)) {
+ HTTP_VALIDATION_OK,
+ HTTP_VALIDATION_NOT_SUPPORTED,
+ HTTP_VALIDATION_TOO_MANY_READ_RETRIES,
+ HTTP_VALIDATION_EXCESS_REQUEST_DATA,
+ HTTP_VALIDATION_MALFORMED_URL,
+ HTTP_VALIDATION_INCOMPLETE,
+#ifdef ENABLE_HTTPS
+ HTTP_VALIDATION_REDIRECT
+#endif
+} HTTP_VALIDATION;
+
+typedef enum __attribute__((packed)) {
+ WEB_CLIENT_FLAG_DEAD = (1 << 0), // this client is dead
+
+ WEB_CLIENT_FLAG_KEEPALIVE = (1 << 1), // the web client will be re-used
+
+ // compression
+ WEB_CLIENT_ENCODING_GZIP = (1 << 2),
+ WEB_CLIENT_ENCODING_DEFLATE = (1 << 3),
+ WEB_CLIENT_CHUNKED_TRANSFER = (1 << 4), // chunked transfer (used with zlib compression)
+
+ WEB_CLIENT_FLAG_WAIT_RECEIVE = (1 << 5), // we are waiting more input data
+ WEB_CLIENT_FLAG_WAIT_SEND = (1 << 6), // we have data to send to the client
+ WEB_CLIENT_FLAG_SSL_WAIT_RECEIVE = (1 << 7), // we are waiting more input data from ssl connection
+ WEB_CLIENT_FLAG_SSL_WAIT_SEND = (1 << 8), // we have data to send to the client from ssl connection
+
+ // DNT
+ WEB_CLIENT_FLAG_DO_NOT_TRACK = (1 << 9), // we should not set cookies on this client
+ WEB_CLIENT_FLAG_TRACKING_REQUIRED = (1 << 10), // we need to send cookies
+
+ // connection type
+ WEB_CLIENT_FLAG_CONN_TCP = (1 << 11), // the client is using a TCP socket
+ WEB_CLIENT_FLAG_CONN_UNIX = (1 << 12), // the client is using a UNIX socket
+ WEB_CLIENT_FLAG_CONN_CLOUD = (1 << 13), // the client is using Netdata Cloud
+ WEB_CLIENT_FLAG_CONN_WEBRTC = (1 << 14), // the client is using WebRTC
+
+ // streaming
+ WEB_CLIENT_FLAG_DONT_CLOSE_SOCKET = (1 << 15), // don't close the socket when cleaning up
+
+ // dashboard version
+ WEB_CLIENT_FLAG_PATH_IS_V0 = (1 << 16), // v0 dashboard found on the path
+ WEB_CLIENT_FLAG_PATH_IS_V1 = (1 << 17), // v1 dashboard found on the path
+ WEB_CLIENT_FLAG_PATH_IS_V2 = (1 << 18), // v2 dashboard found on the path
+ WEB_CLIENT_FLAG_PATH_HAS_TRAILING_SLASH = (1 << 19), // the path has a trailing hash
+ WEB_CLIENT_FLAG_PATH_HAS_FILE_EXTENSION = (1 << 20), // the path ends with a filename extension
+
+ // authorization
+ WEB_CLIENT_FLAG_AUTH_CLOUD = (1 << 21),
+ WEB_CLIENT_FLAG_AUTH_BEARER = (1 << 22),
+ WEB_CLIENT_FLAG_AUTH_GOD = (1 << 23),
+
+ // transient settings
+ WEB_CLIENT_FLAG_PROGRESS_TRACKING = (1 << 24), // flag to avoid redoing progress work
+} WEB_CLIENT_FLAGS;
+
+#define WEB_CLIENT_FLAG_PATH_WITH_VERSION (WEB_CLIENT_FLAG_PATH_IS_V0|WEB_CLIENT_FLAG_PATH_IS_V1|WEB_CLIENT_FLAG_PATH_IS_V2)
+#define web_client_reset_path_flags(w) (w)->flags &= ~(WEB_CLIENT_FLAG_PATH_WITH_VERSION|WEB_CLIENT_FLAG_PATH_HAS_TRAILING_SLASH|WEB_CLIENT_FLAG_PATH_HAS_FILE_EXTENSION)
+
+#define web_client_flag_check(w, flag) ((w)->flags & (flag))
+#define web_client_flag_set(w, flag) (w)->flags |= (flag)
+#define web_client_flag_clear(w, flag) (w)->flags &= ~(flag)
+
+#define WEB_CLIENT_IS_DEAD(w) web_client_flag_set(w, WEB_CLIENT_FLAG_DEAD)
+#define web_client_check_dead(w) web_client_flag_check(w, WEB_CLIENT_FLAG_DEAD)
+
+#define web_client_has_keepalive(w) web_client_flag_check(w, WEB_CLIENT_FLAG_KEEPALIVE)
+#define web_client_enable_keepalive(w) web_client_flag_set(w, WEB_CLIENT_FLAG_KEEPALIVE)
+#define web_client_disable_keepalive(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_KEEPALIVE)
+
+#define web_client_has_donottrack(w) web_client_flag_check(w, WEB_CLIENT_FLAG_DO_NOT_TRACK)
+#define web_client_enable_donottrack(w) web_client_flag_set(w, WEB_CLIENT_FLAG_DO_NOT_TRACK)
+#define web_client_disable_donottrack(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_DO_NOT_TRACK)
+
+#define web_client_has_tracking_required(w) web_client_flag_check(w, WEB_CLIENT_FLAG_TRACKING_REQUIRED)
+#define web_client_enable_tracking_required(w) web_client_flag_set(w, WEB_CLIENT_FLAG_TRACKING_REQUIRED)
+#define web_client_disable_tracking_required(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_TRACKING_REQUIRED)
+
+#define web_client_has_wait_receive(w) web_client_flag_check(w, WEB_CLIENT_FLAG_WAIT_RECEIVE)
+#define web_client_enable_wait_receive(w) web_client_flag_set(w, WEB_CLIENT_FLAG_WAIT_RECEIVE)
+#define web_client_disable_wait_receive(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_WAIT_RECEIVE)
+
+#define web_client_has_wait_send(w) web_client_flag_check(w, WEB_CLIENT_FLAG_WAIT_SEND)
+#define web_client_enable_wait_send(w) web_client_flag_set(w, WEB_CLIENT_FLAG_WAIT_SEND)
+#define web_client_disable_wait_send(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_WAIT_SEND)
+
+#define web_client_has_ssl_wait_receive(w) web_client_flag_check(w, WEB_CLIENT_FLAG_SSL_WAIT_RECEIVE)
+#define web_client_enable_ssl_wait_receive(w) web_client_flag_set(w, WEB_CLIENT_FLAG_SSL_WAIT_RECEIVE)
+#define web_client_disable_ssl_wait_receive(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_SSL_WAIT_RECEIVE)
+
+#define web_client_has_ssl_wait_send(w) web_client_flag_check(w, WEB_CLIENT_FLAG_SSL_WAIT_SEND)
+#define web_client_enable_ssl_wait_send(w) web_client_flag_set(w, WEB_CLIENT_FLAG_SSL_WAIT_SEND)
+#define web_client_disable_ssl_wait_send(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_SSL_WAIT_SEND)
+
+#define web_client_check_conn_unix(w) web_client_flag_check(w, WEB_CLIENT_FLAG_CONN_UNIX)
+#define web_client_check_conn_tcp(w) web_client_flag_check(w, WEB_CLIENT_FLAG_CONN_TCP)
+#define web_client_check_conn_cloud(w) web_client_flag_check(w, WEB_CLIENT_FLAG_CONN_CLOUD)
+#define web_client_check_conn_webrtc(w) web_client_flag_check(w, WEB_CLIENT_FLAG_CONN_WEBRTC)
+
+#define WEB_CLIENT_FLAG_ALL_AUTHS (WEB_CLIENT_FLAG_AUTH_CLOUD | WEB_CLIENT_FLAG_AUTH_BEARER)
+#define web_client_flags_clear_conn(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_CONN_TCP | WEB_CLIENT_FLAG_CONN_UNIX | WEB_CLIENT_FLAG_CONN_CLOUD | WEB_CLIENT_FLAG_CONN_WEBRTC)
+#define web_client_flags_check_auth(w) web_client_flag_check(w, WEB_CLIENT_FLAG_ALL_AUTHS)
+#define web_client_flags_clear_auth(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_ALL_AUTHS)
+
+void web_client_reset_permissions(struct web_client *w);
+void web_client_set_permissions(struct web_client *w, HTTP_ACCESS access, HTTP_USER_ROLE role, WEB_CLIENT_FLAGS auth);
+
+void web_client_set_conn_tcp(struct web_client *w);
+void web_client_set_conn_unix(struct web_client *w);
+void web_client_set_conn_cloud(struct web_client *w);
+void web_client_set_conn_webrtc(struct web_client *w);
+
+#define NETDATA_WEB_REQUEST_URL_SIZE 65536 // static allocation
+
+#define NETDATA_WEB_RESPONSE_ZLIB_CHUNK_SIZE 16384
+
+#define NETDATA_WEB_RESPONSE_HEADER_INITIAL_SIZE 4096
+#define NETDATA_WEB_RESPONSE_INITIAL_SIZE 8192
+#define NETDATA_WEB_REQUEST_INITIAL_SIZE 8192
+#define NETDATA_WEB_REQUEST_MAX_SIZE 65536
+#define NETDATA_WEB_DECODED_URL_INITIAL_SIZE 512
+
+#define CLOUD_USER_NAME_LENGTH 64
+
+struct response {
+ BUFFER *header; // our response header
+ BUFFER *header_output; // internal use
+ BUFFER *data; // our response data buffer
+
+ short int code; // the HTTP response code
+ bool has_cookies;
+
+ size_t rlen; // if non-zero, the excepted size of ifd (input of firecopy)
+ size_t sent; // current data length sent to output
+
+ bool zoutput; // if set to 1, web_client_send() will send compressed data
+
+ bool zinitialized;
+ z_stream zstream; // zlib stream for sending compressed output to client
+ size_t zsent; // the compressed bytes we have sent to the client
+ size_t zhave; // the compressed bytes that we have received from zlib
+ Bytef zbuffer[NETDATA_WEB_RESPONSE_ZLIB_CHUNK_SIZE]; // temporary buffer for storing compressed output
+};
+
+struct web_client;
+typedef bool (*web_client_interrupt_t)(struct web_client *, void *data);
+
+struct web_client {
+ unsigned long long id;
+ size_t use_count;
+
+ nd_uuid_t transaction;
+
+ WEB_CLIENT_FLAGS flags; // status flags for the client
+ HTTP_REQUEST_MODE mode; // the operational mode of the client
+ HTTP_ACL acl; // the access list of the client
+ HTTP_ACL port_acl; // the operations permitted on the port the client connected to
+ HTTP_ACCESS access; // the access permissions of the client
+ HTTP_USER_ROLE user_role; // the user role of the client
+ size_t header_parse_tries;
+ size_t header_parse_last_size;
+
+ bool tcp_cork;
+ int ifd;
+ int ofd;
+
+ char client_ip[INET6_ADDRSTRLEN]; // Defined buffer sizes include null-terminators
+ char client_port[NI_MAXSERV];
+ char client_host[NI_MAXHOST];
+
+ BUFFER *url_as_received; // the entire URL as received, used for logging - DO NOT MODIFY
+ BUFFER *url_path_decoded; // the path, decoded - it is incrementally parsed and altered
+ BUFFER *url_query_string_decoded; // the query string, decoded - it is incrementally parsed and altered
+
+ // THESE NEED TO BE FREED
+ char *auth_bearer_token; // the Bearer auth token (if sent)
+ char *server_host; // the Host: header
+ char *forwarded_host; // the X-Forwarded-Host: header
+ char *forwarded_for; // the X-Forwarded-For: header
+ char *origin; // the Origin: header
+ char *user_agent; // the User-Agent: header
+
+ BUFFER *payload; // when this request is a POST, this has the payload
+
+ // STATIC-THREADED WEB SERVER MEMBERS
+ size_t pollinfo_slot; // POLLINFO slot of the web client
+ size_t pollinfo_filecopy_slot; // POLLINFO slot of the file read
+
+#ifdef ENABLE_HTTPS
+ NETDATA_SSL ssl;
+#endif
+
+ struct {
+ nd_uuid_t bearer_token;
+ nd_uuid_t cloud_account_id;
+ char client_name[CLOUD_USER_NAME_LENGTH];
+ } auth;
+
+ struct { // A callback to check if the query should be interrupted / stopped
+ web_client_interrupt_t callback;
+ void *callback_data;
+ } interrupt;
+
+ struct {
+ size_t received_bytes;
+ size_t sent_bytes;
+ size_t *memory_accounting; // temporary pointer for constructor to use
+ } statistics;
+
+ struct {
+ usec_t timeout_ut; // timeout if set, or zero
+ struct timeval tv_in; // request received
+ struct timeval tv_ready; // request processed - response ready
+ struct timeval tv_timeout_last_checkpoint; // last checkpoint
+ } timings;
+
+ struct {
+ struct web_client *prev;
+ struct web_client *next;
+ } cache;
+
+ struct response response;
+};
+
+int web_client_permission_denied(struct web_client *w);
+int web_client_permission_denied_acl(struct web_client *w);
+
+int web_client_service_unavailable(struct web_client *w);
+
+ssize_t web_client_send(struct web_client *w);
+ssize_t web_client_receive(struct web_client *w);
+ssize_t web_client_read_file(struct web_client *w);
+
+void web_client_process_request_from_web_server(struct web_client *w);
+void web_client_request_done(struct web_client *w);
+
+void web_client_build_http_header(struct web_client *w);
+
+void web_client_reuse_from_cache(struct web_client *w);
+struct web_client *web_client_create(size_t *statistics_memory_accounting);
+void web_client_free(struct web_client *w);
+
+#include "web/api/web_api_v1.h"
+#include "web/api/web_api_v2.h"
+#include "daemon/common.h"
+
+void web_client_decode_path_and_query_string(struct web_client *w, const char *path_and_query_string);
+int web_client_api_request(RRDHOST *host, struct web_client *w, char *url_path_fragment);
+int web_client_api_request_with_node_selection(RRDHOST *host, struct web_client *w, char *decoded_url_path);
+
+void web_client_timeout_checkpoint_init(struct web_client *w);
+void web_client_timeout_checkpoint_set(struct web_client *w, int timeout_ms);
+usec_t web_client_timeout_checkpoint(struct web_client *w);
+bool web_client_timeout_checkpoint_and_check(struct web_client *w, usec_t *usec_since_last_checkpoint);
+usec_t web_client_timeout_checkpoint_response_ready(struct web_client *w, usec_t *usec_since_last_checkpoint);
+void web_client_log_completed_request(struct web_client *w, bool update_web_stats);
+
+HTTP_VALIDATION http_request_validate(struct web_client *w);
+
+#endif
diff --git a/src/web/server/web_client_cache.c b/src/web/server/web_client_cache.c
new file mode 100644
index 000000000..654577e8a
--- /dev/null
+++ b/src/web/server/web_client_cache.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#define WEB_SERVER_INTERNALS 1
+#include "web_client_cache.h"
+
+// ----------------------------------------------------------------------------
+// allocate and free web_clients
+
+// ----------------------------------------------------------------------------
+// web clients caching
+
+// When clients connect and disconnect, avoid allocating and releasing memory.
+// Instead, when new clients get connected, reuse any memory previously allocated
+// for serving web clients that are now disconnected.
+
+// The size of the cache is adaptive. It caches the structures of 2x
+// the number of currently connected clients.
+
+static struct clients_cache {
+ struct {
+ SPINLOCK spinlock;
+ struct web_client *head; // the structures of the currently connected clients
+ size_t count; // the count the currently connected clients
+
+ size_t allocated; // the number of allocations
+ size_t reused; // the number of re-uses
+ } used;
+
+ struct {
+ SPINLOCK spinlock;
+ struct web_client *head; // the cached structures, available for future clients
+ size_t count; // the number of cached structures
+ } avail;
+} web_clients_cache = {
+ .used = {
+ .spinlock = NETDATA_SPINLOCK_INITIALIZER,
+ .head = NULL,
+ .count = 0,
+ .reused = 0,
+ .allocated = 0,
+ },
+ .avail = {
+ .spinlock = NETDATA_SPINLOCK_INITIALIZER,
+ .head = NULL,
+ .count = 0,
+ },
+};
+
+// destroy the cache and free all the memory it uses
+void web_client_cache_destroy(void) {
+ internal_error(true, "web_client_cache has %zu used and %zu available clients, allocated %zu, reused %zu (hit %zu%%)."
+ , web_clients_cache.used.count
+ , web_clients_cache.avail.count
+ , web_clients_cache.used.allocated
+ , web_clients_cache.used.reused
+ , (web_clients_cache.used.allocated + web_clients_cache.used.reused)?(web_clients_cache.used.reused * 100 / (web_clients_cache.used.allocated + web_clients_cache.used.reused)):0
+ );
+
+ struct web_client *w, *t;
+
+ spinlock_lock(&web_clients_cache.avail.spinlock);
+ w = web_clients_cache.avail.head;
+ while(w) {
+ t = w;
+ w = w->cache.next;
+ web_client_free(t);
+ }
+ web_clients_cache.avail.head = NULL;
+ web_clients_cache.avail.count = 0;
+ spinlock_unlock(&web_clients_cache.avail.spinlock);
+
+// DO NOT FREE THEM IF THEY ARE USED
+// spinlock_lock(&web_clients_cache.used.spinlock);
+// w = web_clients_cache.used.head;
+// while(w) {
+// t = w;
+// w = w->next;
+// web_client_free(t);
+// }
+// web_clients_cache.used.head = NULL;
+// web_clients_cache.used.count = 0;
+// web_clients_cache.used.reused = 0;
+// web_clients_cache.used.allocated = 0;
+// spinlock_unlock(&web_clients_cache.used.spinlock);
+}
+
+struct web_client *web_client_get_from_cache(void) {
+ spinlock_lock(&web_clients_cache.avail.spinlock);
+ struct web_client *w = web_clients_cache.avail.head;
+ if(w) {
+ // get it from avail
+ DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(web_clients_cache.avail.head, w, cache.prev, cache.next);
+ web_clients_cache.avail.count--;
+
+ spinlock_unlock(&web_clients_cache.avail.spinlock);
+ web_client_reuse_from_cache(w);
+ spinlock_lock(&web_clients_cache.used.spinlock);
+
+ web_clients_cache.used.reused++;
+ }
+ else {
+ spinlock_unlock(&web_clients_cache.avail.spinlock);
+ w = web_client_create(&netdata_buffers_statistics.buffers_web);
+ spinlock_lock(&web_clients_cache.used.spinlock);
+
+ w->id = global_statistics_web_client_connected();
+ web_clients_cache.used.allocated++;
+ }
+
+ // link it to used web clients
+ DOUBLE_LINKED_LIST_PREPEND_ITEM_UNSAFE(web_clients_cache.used.head, w, cache.prev, cache.next);
+ web_clients_cache.used.count++;
+ spinlock_unlock(&web_clients_cache.used.spinlock);
+
+ // initialize it
+ w->use_count++;
+ w->port_acl = HTTP_ACL_NONE;
+ w->acl = HTTP_ACL_NONE;
+ w->mode = HTTP_REQUEST_MODE_GET;
+ web_client_reset_permissions(w);
+ memset(w->transaction, 0, sizeof(w->transaction));
+
+ return w;
+}
+
+void web_client_release_to_cache(struct web_client *w) {
+
+#ifdef ENABLE_HTTPS
+ netdata_ssl_close(&w->ssl);
+#endif
+
+ // unlink it from the used
+ spinlock_lock(&web_clients_cache.used.spinlock);
+ DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(web_clients_cache.used.head, w, cache.prev, cache.next);
+ ssize_t used_count = (ssize_t)--web_clients_cache.used.count;
+ spinlock_unlock(&web_clients_cache.used.spinlock);
+
+ spinlock_lock(&web_clients_cache.avail.spinlock);
+ if(w->use_count > 100 || (used_count > 0 && web_clients_cache.avail.count >= 2 * (size_t)used_count) || (used_count <= 10 && web_clients_cache.avail.count >= 20)) {
+ spinlock_unlock(&web_clients_cache.avail.spinlock);
+
+ // we have too many of them - free it
+ web_client_free(w);
+ }
+ else {
+ // link it to the avail
+ DOUBLE_LINKED_LIST_PREPEND_ITEM_UNSAFE(web_clients_cache.avail.head, w, cache.prev, cache.next);
+ web_clients_cache.avail.count++;
+ spinlock_unlock(&web_clients_cache.avail.spinlock);
+ }
+}
diff --git a/src/web/server/web_client_cache.h b/src/web/server/web_client_cache.h
new file mode 100644
index 000000000..85cde3e83
--- /dev/null
+++ b/src/web/server/web_client_cache.h
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_WEB_CLIENT_CACHE_H
+#define NETDATA_WEB_CLIENT_CACHE_H
+
+#include "libnetdata/libnetdata.h"
+#include "web_client.h"
+
+void web_client_release_to_cache(struct web_client *w);
+struct web_client *web_client_get_from_cache(void);
+void web_client_cache_destroy(void);
+
+#include "web_server.h"
+
+#endif //NETDATA_WEB_CLIENT_CACHE_H
diff --git a/src/web/server/web_server.c b/src/web/server/web_server.c
new file mode 100644
index 000000000..3497af13c
--- /dev/null
+++ b/src/web/server/web_server.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#define WEB_SERVER_INTERNALS 1
+#include "web_server.h"
+
+WEB_SERVER_MODE web_server_mode = WEB_SERVER_MODE_STATIC_THREADED;
+
+// --------------------------------------------------------------------------------------
+
+WEB_SERVER_MODE web_server_mode_id(const char *mode) {
+ if(!strcmp(mode, "none"))
+ return WEB_SERVER_MODE_NONE;
+ else
+ return WEB_SERVER_MODE_STATIC_THREADED;
+
+}
+
+const char *web_server_mode_name(WEB_SERVER_MODE id) {
+ switch(id) {
+ case WEB_SERVER_MODE_NONE:
+ return "none";
+ default:
+ case WEB_SERVER_MODE_STATIC_THREADED:
+ return "static-threaded";
+ }
+}
+
+// --------------------------------------------------------------------------------------
+// API sockets
+
+LISTEN_SOCKETS api_sockets = {
+ .config = &netdata_config,
+ .config_section = CONFIG_SECTION_WEB,
+ .default_bind_to = "*",
+ .default_port = API_LISTEN_PORT,
+ .backlog = API_LISTEN_BACKLOG
+};
+
+void debug_sockets() {
+ BUFFER *wb = buffer_create(256 * sizeof(char), NULL);
+ int i;
+
+ for(i = 0 ; i < (int)api_sockets.opened ; i++) {
+ buffer_strcat(wb, (api_sockets.fds_acl_flags[i] & HTTP_ACL_NOCHECK) ? "NONE " : "");
+ buffer_strcat(wb, (api_sockets.fds_acl_flags[i] & HTTP_ACL_DASHBOARD) ? "dashboard " : "");
+ buffer_strcat(wb, (api_sockets.fds_acl_flags[i] & HTTP_ACL_REGISTRY) ? "registry " : "");
+ buffer_strcat(wb, (api_sockets.fds_acl_flags[i] & HTTP_ACL_BADGES) ? "badges " : "");
+ buffer_strcat(wb, (api_sockets.fds_acl_flags[i] & HTTP_ACL_MANAGEMENT) ? "management " : "");
+ buffer_strcat(wb, (api_sockets.fds_acl_flags[i] & HTTP_ACL_STREAMING) ? "streaming " : "");
+ buffer_strcat(wb, (api_sockets.fds_acl_flags[i] & HTTP_ACL_NETDATACONF) ? "netdata.conf " : "");
+ netdata_log_debug(D_WEB_CLIENT, "Socket fd %d name '%s' acl_flags: %s",
+ i,
+ api_sockets.fds_names[i],
+ buffer_tostring(wb));
+ buffer_reset(wb);
+ }
+ buffer_free(wb);
+}
+
+bool api_listen_sockets_setup(void) {
+ int socks = listen_sockets_setup(&api_sockets);
+
+ if(!socks)
+ return false;
+
+ if(unlikely(debug_flags & D_WEB_CLIENT))
+ debug_sockets();
+
+ return true;
+}
+
+
+// --------------------------------------------------------------------------------------
+// access lists
+
+SIMPLE_PATTERN *web_allow_connections_from = NULL;
+int web_allow_connections_dns;
+
+// WEB_CLIENT_ACL
+SIMPLE_PATTERN *web_allow_dashboard_from = NULL;
+int web_allow_dashboard_dns;
+SIMPLE_PATTERN *web_allow_registry_from = NULL;
+int web_allow_registry_dns;
+SIMPLE_PATTERN *web_allow_badges_from = NULL;
+int web_allow_badges_dns;
+SIMPLE_PATTERN *web_allow_mgmt_from = NULL;
+int web_allow_mgmt_dns;
+SIMPLE_PATTERN *web_allow_streaming_from = NULL;
+int web_allow_streaming_dns;
+SIMPLE_PATTERN *web_allow_netdataconf_from = NULL;
+int web_allow_netdataconf_dns;
+
+void web_client_update_acl_matches(struct web_client *w) {
+ w->acl = HTTP_ACL_TRANSPORTS;
+
+ if(!(w->port_acl & HTTP_ACL_TRANSPORTS_WITHOUT_CLIENT_IP_VALIDATION)) {
+ if (!web_allow_dashboard_from ||
+ connection_allowed(w->ifd, w->client_ip, w->client_host, sizeof(w->client_host),
+ web_allow_dashboard_from, "dashboard", web_allow_dashboard_dns))
+ w->acl |= HTTP_ACL_DASHBOARD;
+
+ if (!web_allow_registry_from ||
+ connection_allowed(w->ifd, w->client_ip, w->client_host, sizeof(w->client_host),
+ web_allow_registry_from, "registry", web_allow_registry_dns))
+ w->acl |= HTTP_ACL_REGISTRY;
+
+ if (!web_allow_badges_from ||
+ connection_allowed(w->ifd, w->client_ip, w->client_host, sizeof(w->client_host),
+ web_allow_badges_from, "badges", web_allow_badges_dns))
+ w->acl |= HTTP_ACL_BADGES;
+
+ if (!web_allow_mgmt_from ||
+ connection_allowed(w->ifd, w->client_ip, w->client_host, sizeof(w->client_host),
+ web_allow_mgmt_from, "management", web_allow_mgmt_dns))
+ w->acl |= HTTP_ACL_MANAGEMENT;
+
+ if (!web_allow_streaming_from ||
+ connection_allowed(w->ifd, w->client_ip, w->client_host, sizeof(w->client_host),
+ web_allow_streaming_from, "streaming", web_allow_streaming_dns))
+ w->acl |= HTTP_ACL_STREAMING;
+
+ if (!web_allow_netdataconf_from ||
+ connection_allowed(w->ifd, w->client_ip, w->client_host, sizeof(w->client_host),
+ web_allow_netdataconf_from, "netdata.conf", web_allow_netdataconf_dns))
+ w->acl |= HTTP_ACL_NETDATACONF;
+ }
+
+ w->acl &= w->port_acl;
+}
+
+
+// --------------------------------------------------------------------------------------
+
+void web_server_log_connection(struct web_client *w, const char *msg) {
+ ND_LOG_STACK lgs[] = {
+ ND_LOG_FIELD_U64(NDF_CONNECTION_ID, w->id),
+#ifdef ENABLE_HTTPS
+ ND_LOG_FIELD_TXT(NDF_SRC_TRANSPORT, SSL_connection(&w->ssl) ? "https" : "http"),
+#else
+ ND_LOG_FIELD_TXT(NDF_SRC_TRANSPORT, "http"),
+#endif
+ ND_LOG_FIELD_TXT(NDF_SRC_IP, w->client_ip),
+ ND_LOG_FIELD_TXT(NDF_SRC_PORT, w->client_port),
+ ND_LOG_FIELD_TXT(NDF_SRC_FORWARDED_HOST, w->forwarded_host),
+ ND_LOG_FIELD_TXT(NDF_SRC_FORWARDED_FOR, w->forwarded_for),
+ ND_LOG_FIELD_END(),
+ };
+ ND_LOG_STACK_PUSH(lgs);
+
+ nd_log(NDLS_ACCESS, NDLP_DEBUG, "[%s]:%s %s", w->client_ip, w->client_port, msg);
+}
diff --git a/src/web/server/web_server.h b/src/web/server/web_server.h
new file mode 100644
index 000000000..a31938c8d
--- /dev/null
+++ b/src/web/server/web_server.h
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_WEB_SERVER_H
+#define NETDATA_WEB_SERVER_H 1
+
+#include "libnetdata/libnetdata.h"
+#include "web_client.h"
+
+#ifndef API_LISTEN_PORT
+#define API_LISTEN_PORT 19999
+#endif
+
+#ifndef API_LISTEN_BACKLOG
+#define API_LISTEN_BACKLOG 4096
+#endif
+
+typedef enum web_server_mode {
+ WEB_SERVER_MODE_STATIC_THREADED,
+ WEB_SERVER_MODE_NONE
+} WEB_SERVER_MODE;
+
+extern SIMPLE_PATTERN *web_allow_connections_from;
+extern int web_allow_connections_dns;
+extern SIMPLE_PATTERN *web_allow_dashboard_from;
+extern int web_allow_dashboard_dns;
+extern SIMPLE_PATTERN *web_allow_registry_from;
+extern int web_allow_registry_dns;
+extern SIMPLE_PATTERN *web_allow_badges_from;
+extern int web_allow_badges_dns;
+extern SIMPLE_PATTERN *web_allow_streaming_from;
+extern int web_allow_streaming_dns;
+extern SIMPLE_PATTERN *web_allow_netdataconf_from;
+extern int web_allow_netdataconf_dns;
+extern SIMPLE_PATTERN *web_allow_mgmt_from;
+extern int web_allow_mgmt_dns;
+
+extern WEB_SERVER_MODE web_server_mode;
+
+WEB_SERVER_MODE web_server_mode_id(const char *mode);
+const char *web_server_mode_name(WEB_SERVER_MODE id);
+
+bool api_listen_sockets_setup(void);
+
+#define DEFAULT_TIMEOUT_TO_RECEIVE_FIRST_WEB_REQUEST 60
+#define DEFAULT_DISCONNECT_IDLE_WEB_CLIENTS_AFTER_SECONDS 60
+extern int web_client_timeout;
+extern int web_client_first_request_timeout;
+extern long web_client_streaming_rate_t;
+
+#ifdef WEB_SERVER_INTERNALS
+extern LISTEN_SOCKETS api_sockets;
+void web_client_update_acl_matches(struct web_client *w);
+void web_server_log_connection(struct web_client *w, const char *msg);
+struct web_client *web_client_create_on_listenfd(int listener);
+
+#include "web_client_cache.h"
+#endif // WEB_SERVER_INTERNALS
+
+#include "static/static-threaded.h"
+
+#include "daemon/common.h"
+
+#endif /* NETDATA_WEB_SERVER_H */