summaryrefslogtreecommitdiffstats
path: root/web/server
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 11:08:07 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 11:08:07 +0000
commitc69cb8cc094cc916adbc516b09e944cd3d137c01 (patch)
treef2878ec41fb6d0e3613906c6722fc02b934eeb80 /web/server
parentInitial commit. (diff)
downloadnetdata-upstream/1.29.3.tar.xz
netdata-upstream/1.29.3.zip
Adding upstream version 1.29.3.upstream/1.29.3upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'web/server')
-rw-r--r--web/server/Makefile.am12
-rw-r--r--web/server/README.md245
-rw-r--r--web/server/static/Makefile.am11
-rw-r--r--web/server/static/README.md17
-rw-r--r--web/server/static/static-threaded.c503
-rw-r--r--web/server/static/static-threaded.h10
-rw-r--r--web/server/web_client.c2035
-rw-r--r--web/server/web_client.h217
-rw-r--r--web/server/web_client_cache.c269
-rw-r--r--web/server/web_client_cache.h31
-rw-r--r--web/server/web_server.c159
-rw-r--r--web/server/web_server.h64
12 files changed, 3573 insertions, 0 deletions
diff --git a/web/server/Makefile.am b/web/server/Makefile.am
new file mode 100644
index 0000000..1ee01bb
--- /dev/null
+++ b/web/server/Makefile.am
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+SUBDIRS = \
+ static \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/server/README.md b/web/server/README.md
new file mode 100644
index 0000000..fbf3151
--- /dev/null
+++ b/web/server/README.md
@@ -0,0 +1,245 @@
+<!--
+title: "Web server"
+description: "The Netdata Agent's local static-threaded web server serves dashboards and real-time visualizations with security and DDoS protection."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/server/README.md
+-->
+
+# Web server
+
+The Netdata web server runs as `static-threaded`, i.e. with a fixed, configurable number of threads.
+It uses non-blocking I/O and respects the `keep-alive` HTTP header to serve multiple HTTP requests via the same connection.
+
+## Configuration
+
+Disable the web server by editing `netdata.conf` and setting:
+
+```
+[web]
+ mode = none
+```
+
+With the web server enabled, control the number of threads and sockets with the following settings:
+
+```
+[web]
+ web server threads = 4
+ web server max sockets = 512
+```
+
+The default number of processor threads is `min(cpu cores, 6)`.
+
+The `web server max sockets` setting is automatically adjusted to 50% of the max number of open files Netdata is allowed to use (via `/etc/security/limits.conf` or systemd), to allow enough file descriptors to be available for data collection.
+
+### Binding Netdata to multiple ports
+
+Netdata can bind to multiple IPs and ports, offering access to different services on each. Up to 100 sockets can be used (increase it at compile time with `CFLAGS="-DMAX_LISTEN_FDS=200" ./netdata-installer.sh ...`).
+
+The ports to bind are controlled via `[web].bind to`, like this:
+
+```
+[web]
+ default port = 19999
+ bind to = 127.0.0.1=dashboard^SSL=optional 10.1.1.1:19998=management|netdata.conf hostname:19997=badges [::]:19996=streaming^SSL=force localhost:19995=registry *:http=dashboard unix:/run/netdata/netdata.sock
+```
+
+Using the above, Netdata will bind to:
+
+- IPv4 127.0.0.1 at port 19999 (port was used from `default port`). Only the UI (dashboard) and the read API will be accessible on this port. Both HTTP and HTTPS requests will be accepted.
+- IPv4 10.1.1.1 at port 19998. The management API and `netdata.conf` will be accessible on this port.
+- All the IPs `hostname` resolves to (both IPv4 and IPv6 depending on the resolved IPs) at port 19997. Only badges will be accessible on this port.
+- All IPv6 IPs at port 19996. Only metric streaming requests from other Netdata agents will be accepted on this port. Only encrypted streams will be allowed (i.e. child nodes also need to be [configured for TLS](/streaming/README.md).
+- All the IPs `localhost` resolves to (both IPv4 and IPv6 depending the resolved IPs) at port 19996. This port will only accept registry API requests.
+- All IPv4 and IPv6 IPs at port `http` as set in `/etc/services`. Only the UI (dashboard) and the read API will be accessible on this port.
+- Unix domain socket `/run/netdata/netdata.sock`. All requests are serviceable on this socket. Note that in some OSs like Fedora, every service sees a different `/tmp`, so don't create a Unix socket under `/tmp`. `/run` or `/var/run` is suggested.
+
+The option `[web].default port` is used when an entries in `[web].bind to` do not specify a port.
+
+Note that the access permissions specified with the `=request type|request type|...` format are available from version 1.12 onwards.
+As shown in the example above, these permissions are optional, with the default being to permit all request types on the specified port.
+The request types are strings identical to the `allow X from` directives of the access lists, i.e. `dashboard`, `streaming`, `registry`, `netdata.conf`, `badges` and `management`.
+The access lists themselves and the general setting `allow connections from` in the next section are applied regardless of the ports that are configured to provide these services.
+The API requests are serviced as follows:
+
+- `dashboard` gives access to the UI, the read API and badges API calls.
+- `badges` gives access only to the badges API calls.
+- `management` gives access only to the management API calls.
+
+### Enabling TLS support
+
+Since v1.16.0, Netdata supports encrypted HTTP connections to the web server, plus encryption of streaming data to a
+parent from its child nodes, via the TLS protocol.
+
+Inbound unix socket connections are unaffected, regardless of the TLS settings.
+
+> While Netdata uses Transport Layer Security (TLS) 1.2 to encrypt communications rather than the obsolete SSL protocol,
+> it's still common practice to refer to encrypted web connections as `SSL`. Many vendors, like Nginx and even Netdata
+> itself, use `SSL` in configuration files, whereas documentation will always refer to encrypted communications as `TLS`
+> or `TLS/SSL`.
+
+To enable TLS, provide the path to your certificate and private key in the `[web]` section of `netdata.conf`:
+
+```conf
+[web]
+ ssl key = /etc/netdata/ssl/key.pem
+ ssl certificate = /etc/netdata/ssl/cert.pem
+```
+
+Both files must be readable by the `netdata` user. If either of these files do not exist or are unreadable, Netdata will fall back to HTTP. For a parent-child connection, only the parent needs these settings.
+
+For test purposes, generate self-signed certificates with the following command:
+
+```bash
+openssl req -newkey rsa:2048 -nodes -sha512 -x509 -days 365 -keyout key.pem -out cert.pem
+```
+
+> If you use 4096 bits for your key and the certificate, Netdata will need more CPU to process the communication.
+> `rsa4096` can be up to 4 times slower than `rsa2048`, so we recommend using 2048 bits. Verify the difference
+> by running:
+>
+> ```sh
+> openssl speed rsa2048 rsa4096
+> ```
+
+### Select TLS version
+
+Beginning with version 1.21, specify the TLS version and the ciphers that you want to use:
+
+```conf
+[web]
+ tls version = 1.3
+ tls ciphers = TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256:TLS_AES_128_GCM_SHA256
+```
+
+If you do not specify these options, Netdata will use the highest available protocol version on your system and the default cipher list for that protocol provided by your TLS implementation.
+
+While Netdata accepts all the TLS version as arguments (`1` or `1.0`, `1.1`, `1.2` and `1.3`), we recommend you use `1.3` for the most secure encryption.
+
+#### TLS/SSL enforcement
+
+When the certificates are defined and unless any other options are provided, a Netdata server will:
+
+- Redirect all incoming HTTP web server requests to HTTPS. Applies to the dashboard, the API, `netdata.conf` and badges.
+- Allow incoming child connections to use both unencrypted and encrypted communications for streaming.
+
+To change this behavior, you need to modify the `bind to` setting in the `[web]` section of `netdata.conf`. At the end of each port definition, append `^SSL=force` or `^SSL=optional`. What happens with these settings differs, depending on whether the port is used for HTTP/S requests, or for streaming.
+
+| SSL setting | HTTP requests|HTTPS requests|Unencrypted Streams|Encrypted Streams|
+|:---------:|:-----------:|:------------:|:-----------------:|:----------------|
+| none | Redirected to HTTPS|Accepted|Accepted|Accepted|
+| `force`| Redirected to HTTPS|Accepted|Denied|Accepted|
+| `optional`| Accepted|Accepted|Accepted|Accepted|
+
+Example:
+
+```
+[web]
+ bind to = *=dashboard|registry|badges|management|streaming|netdata.conf^SSL=force
+```
+
+For information how to configure the child to use TLS, check [securing the communication](/streaming/README.md#securing-streaming-communications) in the streaming documentation. There you will find additional details on the expected behavior for client and server nodes, when their respective TLS options are enabled.
+
+When we define the use of SSL in a Netdata agent for different ports, Netdata will apply the behavior specified on each port. For example, using the configuration line below:
+
+```
+[web]
+ bind to = *=dashboard|registry|badges|management|streaming|netdata.conf^SSL=force *:20000=netdata.conf^SSL=optional *:20001=dashboard|registry
+```
+
+Netdata will:
+
+- Force all HTTP requests to the default port to be redirected to HTTPS (same port).
+- Refuse unencrypted streaming connections from child nodes on the default port.
+- Allow both HTTP and HTTPS requests to port 20000 for `netdata.conf`
+- Force HTTP requests to port 20001 to be redirected to HTTPS (same port). Only allow requests for the dashboard, the read API and the registry on port 20001.
+
+#### TLS/SSL errors
+
+When you start using Netdata with TLS, you may find errors in the Netdata log, which is stored at `/var/log/netdata/error.log` by default.
+
+Most of the time, these errors are due to incompatibilities between your browser's options related to TLS/SSL protocols and Netdata's internal configuration. The most common error is `error:00000006:lib(0):func(0):EVP lib`.
+
+In the near future, Netdata will allow our users to change the internal configuration to avoid similar errors. Until then, we're recommending only the most common and safe encryption protocols listed above.
+
+### Access lists
+
+Netdata supports access lists in `netdata.conf`:
+
+```
+[web]
+ allow connections from = localhost *
+ allow dashboard from = localhost *
+ allow badges from = *
+ allow streaming from = *
+ allow netdata.conf from = localhost fd* 10.* 192.168.* 172.16.* 172.17.* 172.18.* 172.19.* 172.20.* 172.21.* 172.22.* 172.23.* 172.24.* 172.25.* 172.26.* 172.27.* 172.28.* 172.29.* 172.30.* 172.31.*
+ allow management from = localhost
+```
+
+`*` does string matches on the IPs or FQDNs of the clients.
+
+- `allow connections from` matches anyone that connects on the Netdata port(s).
+ So, if someone is not allowed, it will be connected and disconnected immediately, without reading even
+ a single byte from its connection. This is a global settings with higher priority to any of the ones below.
+
+- `allow dashboard from` receives the request and examines if it is a static dashboard file or an API call the
+ dashboards do.
+
+- `allow badges from` checks if the API request is for a badge. Badges are not matched by `allow dashboard from`.
+
+- `allow streaming from` checks if the child willing to stream metrics to this Netdata is allowed.
+ This can be controlled per API KEY and MACHINE GUID in `stream.conf`.
+ The setting in `netdata.conf` is checked before the ones in `stream.conf`.
+
+- `allow netdata.conf from` checks the IP to allow `http://netdata.host:19999/netdata.conf`.
+ The IPs listed are all the private IPv4 addresses, including link local IPv6 addresses. Keep in mind that connections to Netdata API ports are filtered by `allow connections from`. So, IPs allowed by `allow netdata.conf from` should also be allowed by `allow connections from`.
+
+- `allow management from` checks the IPs to allow API management calls. Management via the API is currently supported for [health](/web/api/health/README.md#health-management-api)
+
+In order to check the FQDN of the connection without opening the Netdata agent to DNS-spoofing, a reverse-dns record
+must be setup for the connecting host. At connection time the reverse-dns of the peer IP address is resolved, and
+a forward DNS resolution is made to validate the IP address against the name-pattern.
+
+Please note that this process can be expensive on a machine that is serving many connections. Each access list has an
+associated configuration option to turn off DNS-based patterns completely to avoid incurring this cost at run-time:
+
+```
+ allow connections by dns = heuristic
+ allow dashboard by dns = heuristic
+ allow badges by dns = heuristic
+ allow streaming by dns = heuristic
+ allow netdata.conf by dns = no
+ allow management by dns = heuristic
+```
+
+The three possible values for each of these options are `yes`, `no` and `heuristic`. The `heuristic` option disables
+the check when the pattern only contains IPv4/IPv6 addresses or `localhost`, and enables it when wildcards are
+present that may match DNS FQDNs.
+
+### Other netdata.conf [web] section options
+
+|setting|default|info|
+|:-----:|:-----:|:---|
+|ses max window|`15`|See [single exponential smoothing](/web/api/queries/des/README.md)|
+|des max window|`15`|See [double exponential smoothing](/web/api/queries/des/README.md)|
+|listen backlog|`4096`|The port backlog. Check `man 2 listen`.|
+|web files owner|`netdata`|The user that owns the web static files. Netdata will refuse to serve a file that is not owned by this user, even if it has read access to that file. If the user given is not found, Netdata will only serve files owned by user given in `run as user`.|
+|web files group|`netdata`|If this is set, Netdata will check if the file is owned by this group and refuse to serve the file if it's not.|
+|disconnect idle clients after seconds|`60`|The time in seconds to disconnect web clients after being totally idle.|
+|timeout for first request|`60`|How long to wait for a client to send a request before closing the socket. Prevents slow request attacks.|
+|accept a streaming request every seconds|`0`|Can be used to set a limit on how often a parent node will accept streaming requests from child nodes in a [streaming and replication setup](/streaming/README.md)|
+|respect do not track policy|`no`|If set to `yes`, will respect the client's browser preferences on storing cookies.|
+|x-frame-options response header||[Avoid clickjacking attacks, by ensuring that the content is not embedded into other sites](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Frame-Options).|
+|enable gzip compression|`yes`|When set to `yes`, Netdata web responses will be GZIP compressed, if the web client accepts such responses.|
+|gzip compression strategy|`default`|Valid strategies are `default`, `filtered`, `huffman only`, `rle` and `fixed`|
+|gzip compression level|`3`|Valid levels are 1 (fastest) to 9 (best ratio)|
+
+## DDoS protection
+
+If you publish your Netdata to the internet, you may want to apply some protection against DDoS:
+
+1. Use the `static-threaded` web server (it is the default)
+2. Use reasonable `[web].web server max sockets` (the default is)
+3. Don't use all your CPU cores for Netdata (lower `[web].web server threads`)
+4. Run the `netdata` process with a low process scheduling priority (the default is the lowest)
+5. If possible, proxy Netdata via a full featured web server (nginx, apache, etc)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fserver%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/server/static/Makefile.am b/web/server/static/Makefile.am
new file mode 100644
index 0000000..59250a9
--- /dev/null
+++ b/web/server/static/Makefile.am
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+SUBDIRS = \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/server/static/README.md b/web/server/static/README.md
new file mode 100644
index 0000000..e095f2e
--- /dev/null
+++ b/web/server/static/README.md
@@ -0,0 +1,17 @@
+<!--
+title: "`static-threaded` web server"
+description: "The Netdata Agent's static-threaded web server spawns a fixed number of threads that listen to web requests and uses non-blocking I/O."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/server/static/README.md
+-->
+
+# `static-threaded` web server
+
+The `static-threaded` web server spawns a fixed number of threads.
+All the threads are concurrently listening for web requests on the same sockets.
+The kernel distributes the incoming requests to them.
+
+Each thread uses non-blocking I/O so it can serve any number of web requests in parallel.
+
+This web server respects the `keep-alive` HTTP header to serve multiple HTTP requests via the same connection.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fserver%2Fstatic%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/server/static/static-threaded.c b/web/server/static/static-threaded.c
new file mode 100644
index 0000000..93e36de
--- /dev/null
+++ b/web/server/static/static-threaded.c
@@ -0,0 +1,503 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#define WEB_SERVER_INTERNALS 1
+#include "static-threaded.h"
+
+int web_client_timeout = DEFAULT_DISCONNECT_IDLE_WEB_CLIENTS_AFTER_SECONDS;
+int web_client_first_request_timeout = DEFAULT_TIMEOUT_TO_RECEIVE_FIRST_WEB_REQUEST;
+long web_client_streaming_rate_t = 0L;
+
+/*
+ * --------------------------------------------------------------------------------------------------------------------
+ * Build web_client state from the pollinfo that describes an accepted connection.
+ */
+static struct web_client *web_client_create_on_fd(POLLINFO *pi) {
+ struct web_client *w;
+
+ w = web_client_get_from_cache_or_allocate();
+ w->ifd = w->ofd = pi->fd;
+
+ strncpyz(w->client_ip, pi->client_ip, sizeof(w->client_ip) - 1);
+ strncpyz(w->client_port, pi->client_port, sizeof(w->client_port) - 1);
+ strncpyz(w->client_host, pi->client_host, sizeof(w->client_host) - 1);
+
+ if(unlikely(!*w->client_ip)) strcpy(w->client_ip, "-");
+ if(unlikely(!*w->client_port)) strcpy(w->client_port, "-");
+ w->port_acl = pi->port_acl;
+
+ web_client_initialize_connection(w);
+ w->pollinfo_slot = pi->slot;
+ return(w);
+}
+
+// --------------------------------------------------------------------------------------
+// the main socket listener - STATIC-THREADED
+
+struct web_server_static_threaded_worker {
+ netdata_thread_t thread;
+
+ int id;
+ int running;
+
+ size_t max_sockets;
+
+ volatile size_t connected;
+ volatile size_t disconnected;
+ volatile size_t receptions;
+ volatile size_t sends;
+ volatile size_t max_concurrent;
+
+ volatile size_t files_read;
+ volatile size_t file_reads;
+};
+
+static long long static_threaded_workers_count = 1;
+
+static struct web_server_static_threaded_worker *static_workers_private_data = NULL;
+static __thread struct web_server_static_threaded_worker *worker_private = NULL;
+
+// ----------------------------------------------------------------------------
+
+static inline int web_server_check_client_status(struct web_client *w) {
+ if(unlikely(web_client_check_dead(w) || (!web_client_has_wait_receive(w) && !web_client_has_wait_send(w))))
+ return -1;
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// web server files
+
+static void *web_server_file_add_callback(POLLINFO *pi, short int *events, void *data) {
+ struct web_client *w = (struct web_client *)data;
+
+ worker_private->files_read++;
+
+ debug(D_WEB_CLIENT, "%llu: ADDED FILE READ ON FD %d", w->id, pi->fd);
+ *events = POLLIN;
+ pi->data = w;
+ return w;
+}
+
+static void web_server_file_del_callback(POLLINFO *pi) {
+ struct web_client *w = (struct web_client *)pi->data;
+ debug(D_WEB_CLIENT, "%llu: RELEASE FILE READ ON FD %d", w->id, pi->fd);
+
+ w->pollinfo_filecopy_slot = 0;
+
+ if(unlikely(!w->pollinfo_slot)) {
+ debug(D_WEB_CLIENT, "%llu: CROSS WEB CLIENT CLEANUP (iFD %d, oFD %d)", w->id, pi->fd, w->ofd);
+ web_client_release(w);
+ }
+}
+
+static int web_server_file_read_callback(POLLINFO *pi, short int *events) {
+ struct web_client *w = (struct web_client *)pi->data;
+
+ // if there is no POLLINFO linked to this, it means the client disconnected
+ // stop the file reading too
+ if(unlikely(!w->pollinfo_slot)) {
+ debug(D_WEB_CLIENT, "%llu: PREVENTED ATTEMPT TO READ FILE ON FD %d, ON CLOSED WEB CLIENT", w->id, pi->fd);
+ return -1;
+ }
+
+ if(unlikely(w->mode != WEB_CLIENT_MODE_FILECOPY || w->ifd == w->ofd)) {
+ debug(D_WEB_CLIENT, "%llu: PREVENTED ATTEMPT TO READ FILE ON FD %d, ON NON-FILECOPY WEB CLIENT", w->id, pi->fd);
+ return -1;
+ }
+
+ debug(D_WEB_CLIENT, "%llu: READING FILE ON FD %d", w->id, pi->fd);
+
+ worker_private->file_reads++;
+ ssize_t ret = unlikely(web_client_read_file(w));
+
+ if(likely(web_client_has_wait_send(w))) {
+ POLLJOB *p = pi->p; // our POLLJOB
+ POLLINFO *wpi = pollinfo_from_slot(p, w->pollinfo_slot); // POLLINFO of the client socket
+
+ debug(D_WEB_CLIENT, "%llu: SIGNALING W TO SEND (iFD %d, oFD %d)", w->id, pi->fd, wpi->fd);
+ p->fds[wpi->slot].events |= POLLOUT;
+ }
+
+ if(unlikely(ret <= 0 || w->ifd == w->ofd)) {
+ debug(D_WEB_CLIENT, "%llu: DONE READING FILE ON FD %d", w->id, pi->fd);
+ return -1;
+ }
+
+ *events = POLLIN;
+ return 0;
+}
+
+static int web_server_file_write_callback(POLLINFO *pi, short int *events) {
+ (void)pi;
+ (void)events;
+
+ error("Writing to web files is not supported!");
+
+ return -1;
+}
+
+// ----------------------------------------------------------------------------
+// web server clients
+
+static void *web_server_add_callback(POLLINFO *pi, short int *events, void *data) {
+ (void)data; // Supress warning on unused argument
+
+ worker_private->connected++;
+
+ size_t concurrent = worker_private->connected - worker_private->disconnected;
+ if(unlikely(concurrent > worker_private->max_concurrent))
+ worker_private->max_concurrent = concurrent;
+
+ *events = POLLIN;
+
+ debug(D_WEB_CLIENT_ACCESS, "LISTENER on %d: new connection.", pi->fd);
+ struct web_client *w = web_client_create_on_fd(pi);
+
+ if (!strncmp(pi->client_port, "UNIX", 4)) {
+ web_client_set_unix(w);
+ } else {
+ web_client_set_tcp(w);
+ }
+
+#ifdef ENABLE_HTTPS
+ if ((!web_client_check_unix(w)) && ( netdata_srv_ctx )) {
+ if( sock_delnonblock(w->ifd) < 0 ){
+ error("Web server cannot remove the non-blocking flag from socket %d",w->ifd);
+ }
+
+ //Read the first 7 bytes from the message, but the message
+ //is not removed from the queue, because we are using MSG_PEEK
+ char test[8];
+ if ( recv(w->ifd,test, 7,MSG_PEEK) == 7 ) {
+ test[7] = 0x00;
+ }
+ else {
+ //Case I do not have success to read 7 bytes,
+ //this means that the mensage was not completely read, so
+ //I cannot identify it yet.
+ sock_setnonblock(w->ifd);
+ return w;
+ }
+
+ //The next two ifs are not together because I am reusing SSL structure
+ if (!w->ssl.conn)
+ {
+ w->ssl.conn = SSL_new(netdata_srv_ctx);
+ if ( w->ssl.conn ) {
+ SSL_set_accept_state(w->ssl.conn);
+ } else {
+ error("Failed to create SSL context on socket fd %d.", w->ifd);
+ if (test[0] < 0x18){
+ WEB_CLIENT_IS_DEAD(w);
+ sock_setnonblock(w->ifd);
+ return w;
+ }
+ }
+ }
+
+ if (w->ssl.conn) {
+ if (SSL_set_fd(w->ssl.conn, w->ifd) != 1) {
+ error("Failed to set the socket to the SSL on socket fd %d.", w->ifd);
+ //The client is not set dead, because I received a normal HTTP request
+ //instead a Client Hello(HTTPS).
+ if ( test[0] < 0x18 ){
+ WEB_CLIENT_IS_DEAD(w);
+ }
+ }
+ else{
+ w->ssl.flags = security_process_accept(w->ssl.conn, (int)test[0]);
+ }
+ }
+
+ sock_setnonblock(w->ifd);
+ } else{
+ w->ssl.flags = NETDATA_SSL_NO_HANDSHAKE;
+ }
+#endif
+
+ debug(D_WEB_CLIENT, "%llu: ADDED CLIENT FD %d", w->id, pi->fd);
+ return w;
+}
+
+// TCP client disconnected
+static void web_server_del_callback(POLLINFO *pi) {
+ worker_private->disconnected++;
+
+ struct web_client *w = (struct web_client *)pi->data;
+
+ w->pollinfo_slot = 0;
+ if(unlikely(w->pollinfo_filecopy_slot)) {
+ POLLINFO *fpi = pollinfo_from_slot(pi->p, w->pollinfo_filecopy_slot); // POLLINFO of the client socket
+ (void)fpi;
+
+ debug(D_WEB_CLIENT, "%llu: THE CLIENT WILL BE FRED BY READING FILE JOB ON FD %d", w->id, fpi->fd);
+ }
+ else {
+ if(web_client_flag_check(w, WEB_CLIENT_FLAG_DONT_CLOSE_SOCKET))
+ pi->flags |= POLLINFO_FLAG_DONT_CLOSE;
+
+ debug(D_WEB_CLIENT, "%llu: CLOSING CLIENT FD %d", w->id, pi->fd);
+ web_client_release(w);
+ }
+}
+
+static int web_server_rcv_callback(POLLINFO *pi, short int *events) {
+ worker_private->receptions++;
+
+ struct web_client *w = (struct web_client *)pi->data;
+ int fd = pi->fd;
+
+ if(unlikely(web_client_receive(w) < 0))
+ return -1;
+
+ debug(D_WEB_CLIENT, "%llu: processing received data on fd %d.", w->id, fd);
+ web_client_process_request(w);
+
+ if(unlikely(w->mode == WEB_CLIENT_MODE_FILECOPY)) {
+ if(w->pollinfo_filecopy_slot == 0) {
+ debug(D_WEB_CLIENT, "%llu: FILECOPY DETECTED ON FD %d", w->id, pi->fd);
+
+ if (unlikely(w->ifd != -1 && w->ifd != w->ofd && w->ifd != fd)) {
+ // add a new socket to poll_events, with the same
+ debug(D_WEB_CLIENT, "%llu: CREATING FILECOPY SLOT ON FD %d", w->id, pi->fd);
+
+ POLLINFO *fpi = poll_add_fd(
+ pi->p
+ , w->ifd
+ , pi->port_acl
+ , 0
+ , POLLINFO_FLAG_CLIENT_SOCKET
+ , "FILENAME"
+ , ""
+ , ""
+ , web_server_file_add_callback
+ , web_server_file_del_callback
+ , web_server_file_read_callback
+ , web_server_file_write_callback
+ , (void *) w
+ );
+
+ if(fpi)
+ w->pollinfo_filecopy_slot = fpi->slot;
+ else {
+ error("Failed to add filecopy fd. Closing client.");
+ return -1;
+ }
+ }
+ }
+ }
+ else {
+ if(unlikely(w->ifd == fd && web_client_has_wait_receive(w)))
+ *events |= POLLIN;
+ }
+
+ if(unlikely(w->ofd == fd && web_client_has_wait_send(w)))
+ *events |= POLLOUT;
+
+ return web_server_check_client_status(w);
+}
+
+static int web_server_snd_callback(POLLINFO *pi, short int *events) {
+ worker_private->sends++;
+
+ struct web_client *w = (struct web_client *)pi->data;
+ int fd = pi->fd;
+
+ debug(D_WEB_CLIENT, "%llu: sending data on fd %d.", w->id, fd);
+
+ if(unlikely(web_client_send(w) < 0))
+ return -1;
+
+ if(unlikely(w->ifd == fd && web_client_has_wait_receive(w)))
+ *events |= POLLIN;
+
+ if(unlikely(w->ofd == fd && web_client_has_wait_send(w)))
+ *events |= POLLOUT;
+
+ return web_server_check_client_status(w);
+}
+
+static void web_server_tmr_callback(void *timer_data) {
+ worker_private = (struct web_server_static_threaded_worker *)timer_data;
+
+ static __thread RRDSET *st = NULL;
+ static __thread RRDDIM *rd_user = NULL, *rd_system = NULL;
+
+ if(unlikely(netdata_exit)) return;
+
+ if(unlikely(!st)) {
+ char id[100 + 1];
+ char title[100 + 1];
+
+ snprintfz(id, 100, "web_thread%d_cpu", worker_private->id + 1);
+ snprintfz(title, 100, "NetData web server thread No %d CPU usage", worker_private->id + 1);
+
+ st = rrdset_create_localhost(
+ "netdata"
+ , id
+ , NULL
+ , "web"
+ , "netdata.web_cpu"
+ , title
+ , "milliseconds/s"
+ , "web"
+ , "stats"
+ , 132000 + worker_private->id
+ , default_rrd_update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_user = rrddim_add(st, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ rd_system = rrddim_add(st, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st);
+
+ struct rusage rusage;
+ getrusage(RUSAGE_THREAD, &rusage);
+ rrddim_set_by_pointer(st, rd_user, rusage.ru_utime.tv_sec * 1000000ULL + rusage.ru_utime.tv_usec);
+ rrddim_set_by_pointer(st, rd_system, rusage.ru_stime.tv_sec * 1000000ULL + rusage.ru_stime.tv_usec);
+ rrdset_done(st);
+}
+
+// ----------------------------------------------------------------------------
+// web server worker thread
+
+static void socket_listen_main_static_threaded_worker_cleanup(void *ptr) {
+ worker_private = (struct web_server_static_threaded_worker *)ptr;
+
+ info("freeing local web clients cache...");
+ web_client_cache_destroy();
+
+ info("stopped after %zu connects, %zu disconnects (max concurrent %zu), %zu receptions and %zu sends",
+ worker_private->connected,
+ worker_private->disconnected,
+ worker_private->max_concurrent,
+ worker_private->receptions,
+ worker_private->sends
+ );
+
+ worker_private->running = 0;
+}
+
+void *socket_listen_main_static_threaded_worker(void *ptr) {
+ worker_private = (struct web_server_static_threaded_worker *)ptr;
+ worker_private->running = 1;
+
+ netdata_thread_cleanup_push(socket_listen_main_static_threaded_worker_cleanup, ptr);
+
+ poll_events(&api_sockets
+ , web_server_add_callback
+ , web_server_del_callback
+ , web_server_rcv_callback
+ , web_server_snd_callback
+ , web_server_tmr_callback
+ , web_allow_connections_from
+ , web_allow_connections_dns
+ , NULL
+ , web_client_first_request_timeout
+ , web_client_timeout
+ , default_rrd_update_every * 1000 // timer_milliseconds
+ , ptr // timer_data
+ , worker_private->max_sockets
+ );
+
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
+
+
+// ----------------------------------------------------------------------------
+// web server main thread - also becomes a worker
+
+static void socket_listen_main_static_threaded_cleanup(void *ptr) {
+ struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
+
+ int i, found = 0;
+ usec_t max = 2 * USEC_PER_SEC, step = 50000;
+
+ // we start from 1, - 0 is self
+ for(i = 1; i < static_threaded_workers_count; i++) {
+ if(static_workers_private_data[i].running) {
+ found++;
+ info("stopping worker %d", i + 1);
+ netdata_thread_cancel(static_workers_private_data[i].thread);
+ }
+ else
+ info("found stopped worker %d", i + 1);
+ }
+
+ while(found && max > 0) {
+ max -= step;
+ info("Waiting %d static web threads to finish...", found);
+ sleep_usec(step);
+ found = 0;
+
+ // we start from 1, - 0 is self
+ for(i = 1; i < static_threaded_workers_count; i++) {
+ if (static_workers_private_data[i].running)
+ found++;
+ }
+ }
+
+ if(found)
+ error("%d static web threads are taking too long to finish. Giving up.", found);
+
+ info("closing all web server sockets...");
+ listen_sockets_close(&api_sockets);
+
+ info("all static web threads stopped.");
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+}
+
+void *socket_listen_main_static_threaded(void *ptr) {
+ netdata_thread_cleanup_push(socket_listen_main_static_threaded_cleanup, ptr);
+ web_server_mode = WEB_SERVER_MODE_STATIC_THREADED;
+
+ if(!api_sockets.opened)
+ fatal("LISTENER: no listen sockets available.");
+
+#ifdef ENABLE_HTTPS
+ security_start_ssl(NETDATA_SSL_CONTEXT_SERVER);
+#endif
+ // 6 threads is the optimal value
+ // since 6 are the parallel connections browsers will do
+ // so, if the machine has more CPUs, avoid using resources unnecessarily
+ int def_thread_count = (processors > 6)?6:processors;
+
+ if (!strcmp(config_get(CONFIG_SECTION_WEB, "mode", ""),"single-threaded")) {
+ info("Running web server with one thread, because mode is single-threaded");
+ config_set(CONFIG_SECTION_WEB, "mode", "static-threaded");
+ def_thread_count = 1;
+ }
+ static_threaded_workers_count = config_get_number(CONFIG_SECTION_WEB, "web server threads", def_thread_count);
+
+ if(static_threaded_workers_count < 1) static_threaded_workers_count = 1;
+
+ size_t max_sockets = (size_t)config_get_number(CONFIG_SECTION_WEB, "web server max sockets", (long long int)(rlimit_nofile.rlim_cur / 4));
+
+ static_workers_private_data = callocz((size_t)static_threaded_workers_count, sizeof(struct web_server_static_threaded_worker));
+
+ web_server_is_multithreaded = (static_threaded_workers_count > 1);
+
+ int i;
+ for(i = 1; i < static_threaded_workers_count; i++) {
+ static_workers_private_data[i].id = i;
+ static_workers_private_data[i].max_sockets = max_sockets / static_threaded_workers_count;
+
+ char tag[50 + 1];
+ snprintfz(tag, 50, "WEB_SERVER[static%d]", i+1);
+
+ info("starting worker %d", i+1);
+ netdata_thread_create(&static_workers_private_data[i].thread, tag, NETDATA_THREAD_OPTION_DEFAULT, socket_listen_main_static_threaded_worker, (void *)&static_workers_private_data[i]);
+ }
+
+ // and the main one
+ static_workers_private_data[0].max_sockets = max_sockets / static_threaded_workers_count;
+ socket_listen_main_static_threaded_worker((void *)&static_workers_private_data[0]);
+
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
diff --git a/web/server/static/static-threaded.h b/web/server/static/static-threaded.h
new file mode 100644
index 0000000..5f4862e
--- /dev/null
+++ b/web/server/static/static-threaded.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_WEB_SERVER_STATIC_THREADED_H
+#define NETDATA_WEB_SERVER_STATIC_THREADED_H
+
+#include "web/server/web_server.h"
+
+extern void *socket_listen_main_static_threaded(void *ptr);
+
+#endif //NETDATA_WEB_SERVER_STATIC_THREADED_H
diff --git a/web/server/web_client.c b/web/server/web_client.c
new file mode 100644
index 0000000..f0856fb
--- /dev/null
+++ b/web/server/web_client.c
@@ -0,0 +1,2035 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "web_client.h"
+
+// this is an async I/O implementation of the web server request parser
+// it is used by all netdata web servers
+
+int respect_web_browser_do_not_track_policy = 0;
+char *web_x_frame_options = NULL;
+
+#ifdef NETDATA_WITH_ZLIB
+int web_enable_gzip = 1, web_gzip_level = 3, web_gzip_strategy = Z_DEFAULT_STRATEGY;
+#endif /* NETDATA_WITH_ZLIB */
+
+inline int web_client_permission_denied(struct web_client *w) {
+ w->response.data->contenttype = CT_TEXT_PLAIN;
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "You are not allowed to access this resource.");
+ w->response.code = HTTP_RESP_FORBIDDEN;
+ return HTTP_RESP_FORBIDDEN;
+}
+
+static inline int web_client_crock_socket(struct web_client *w) {
+#ifdef TCP_CORK
+ if(likely(web_client_is_corkable(w) && !w->tcp_cork && w->ofd != -1)) {
+ w->tcp_cork = 1;
+ if(unlikely(setsockopt(w->ofd, IPPROTO_TCP, TCP_CORK, (char *) &w->tcp_cork, sizeof(int)) != 0)) {
+ error("%llu: failed to enable TCP_CORK on socket.", w->id);
+
+ w->tcp_cork = 0;
+ return -1;
+ }
+ }
+#else
+ (void)w;
+#endif /* TCP_CORK */
+
+ return 0;
+}
+
+static inline int web_client_uncrock_socket(struct web_client *w) {
+#ifdef TCP_CORK
+ if(likely(w->tcp_cork && w->ofd != -1)) {
+ w->tcp_cork = 0;
+ if(unlikely(setsockopt(w->ofd, IPPROTO_TCP, TCP_CORK, (char *) &w->tcp_cork, sizeof(int)) != 0)) {
+ error("%llu: failed to disable TCP_CORK on socket.", w->id);
+ w->tcp_cork = 1;
+ return -1;
+ }
+ }
+#else
+ (void)w;
+#endif /* TCP_CORK */
+
+ return 0;
+}
+
+static inline char *strip_control_characters(char *url) {
+ char *s = url;
+ if(!s) return "";
+
+ if(iscntrl(*s)) *s = ' ';
+ while(*++s) {
+ if(iscntrl(*s)) *s = ' ';
+ }
+
+ return url;
+}
+
+void web_client_request_done(struct web_client *w) {
+ web_client_uncrock_socket(w);
+
+ debug(D_WEB_CLIENT, "%llu: Resetting client.", w->id);
+
+ if(likely(w->last_url[0])) {
+ struct timeval tv;
+ now_realtime_timeval(&tv);
+
+ size_t size = (w->mode == WEB_CLIENT_MODE_FILECOPY)?w->response.rlen:w->response.data->len;
+ size_t sent = size;
+#ifdef NETDATA_WITH_ZLIB
+ if(likely(w->response.zoutput)) sent = (size_t)w->response.zstream.total_out;
+#endif
+
+ // --------------------------------------------------------------------
+ // global statistics
+
+ finished_web_request_statistics(dt_usec(&tv, &w->tv_in),
+ w->stats_received_bytes,
+ w->stats_sent_bytes,
+ size,
+ sent);
+
+ w->stats_received_bytes = 0;
+ w->stats_sent_bytes = 0;
+
+
+ // --------------------------------------------------------------------
+
+ const char *mode;
+ switch(w->mode) {
+ case WEB_CLIENT_MODE_FILECOPY:
+ mode = "FILECOPY";
+ break;
+
+ case WEB_CLIENT_MODE_OPTIONS:
+ mode = "OPTIONS";
+ break;
+
+ case WEB_CLIENT_MODE_STREAM:
+ mode = "STREAM";
+ break;
+
+ case WEB_CLIENT_MODE_NORMAL:
+ mode = "DATA";
+ break;
+
+ default:
+ mode = "UNKNOWN";
+ break;
+ }
+
+ // access log
+ log_access("%llu: %d '[%s]:%s' '%s' (sent/all = %zu/%zu bytes %0.0f%%, prep/sent/total = %0.2f/%0.2f/%0.2f ms) %d '%s'",
+ w->id
+ , gettid()
+ , w->client_ip
+ , w->client_port
+ , mode
+ , sent
+ , size
+ , -((size > 0) ? ((size - sent) / (double) size * 100.0) : 0.0)
+ , dt_usec(&w->tv_ready, &w->tv_in) / 1000.0
+ , dt_usec(&tv, &w->tv_ready) / 1000.0
+ , dt_usec(&tv, &w->tv_in) / 1000.0
+ , w->response.code
+ , strip_control_characters(w->last_url)
+ );
+ }
+
+ if(unlikely(w->mode == WEB_CLIENT_MODE_FILECOPY)) {
+ if(w->ifd != w->ofd) {
+ debug(D_WEB_CLIENT, "%llu: Closing filecopy input file descriptor %d.", w->id, w->ifd);
+
+ if(web_server_mode != WEB_SERVER_MODE_STATIC_THREADED) {
+ if (w->ifd != -1){
+ close(w->ifd);
+ }
+ }
+
+ w->ifd = w->ofd;
+ }
+ }
+
+ w->last_url[0] = '\0';
+ w->cookie1[0] = '\0';
+ w->cookie2[0] = '\0';
+ w->origin[0] = '*';
+ w->origin[1] = '\0';
+
+ freez(w->user_agent); w->user_agent = NULL;
+ if (w->auth_bearer_token) {
+ freez(w->auth_bearer_token);
+ w->auth_bearer_token = NULL;
+ }
+
+ w->mode = WEB_CLIENT_MODE_NORMAL;
+
+ w->tcp_cork = 0;
+ web_client_disable_donottrack(w);
+ web_client_disable_tracking_required(w);
+ web_client_disable_keepalive(w);
+ w->decoded_url[0] = '\0';
+
+ buffer_reset(w->response.header_output);
+ buffer_reset(w->response.header);
+ buffer_reset(w->response.data);
+ w->response.rlen = 0;
+ w->response.sent = 0;
+ w->response.code = 0;
+
+ w->header_parse_tries = 0;
+ w->header_parse_last_size = 0;
+
+ web_client_enable_wait_receive(w);
+ web_client_disable_wait_send(w);
+
+ w->response.zoutput = 0;
+
+ // if we had enabled compression, release it
+#ifdef NETDATA_WITH_ZLIB
+ if(w->response.zinitialized) {
+ debug(D_DEFLATE, "%llu: Freeing compression resources.", w->id);
+ deflateEnd(&w->response.zstream);
+ w->response.zsent = 0;
+ w->response.zhave = 0;
+ w->response.zstream.avail_in = 0;
+ w->response.zstream.avail_out = 0;
+ w->response.zstream.total_in = 0;
+ w->response.zstream.total_out = 0;
+ w->response.zinitialized = 0;
+ w->flags &= ~WEB_CLIENT_CHUNKED_TRANSFER;
+ }
+#endif // NETDATA_WITH_ZLIB
+}
+
+uid_t web_files_uid(void) {
+ static char *web_owner = NULL;
+ static uid_t owner_uid = 0;
+
+ if(unlikely(!web_owner)) {
+ // getpwuid() is not thread safe,
+ // but we have called this function once
+ // while single threaded
+ struct passwd *pw = getpwuid(geteuid());
+ web_owner = config_get(CONFIG_SECTION_WEB, "web files owner", (pw)?(pw->pw_name?pw->pw_name:""):"");
+ if(!web_owner || !*web_owner)
+ owner_uid = geteuid();
+ else {
+ // getpwnam() is not thread safe,
+ // but we have called this function once
+ // while single threaded
+ pw = getpwnam(web_owner);
+ if(!pw) {
+ error("User '%s' is not present. Ignoring option.", web_owner);
+ owner_uid = geteuid();
+ }
+ else {
+ debug(D_WEB_CLIENT, "Web files owner set to %s.", web_owner);
+ owner_uid = pw->pw_uid;
+ }
+ }
+ }
+
+ return(owner_uid);
+}
+
+gid_t web_files_gid(void) {
+ static char *web_group = NULL;
+ static gid_t owner_gid = 0;
+
+ if(unlikely(!web_group)) {
+ // getgrgid() is not thread safe,
+ // but we have called this function once
+ // while single threaded
+ struct group *gr = getgrgid(getegid());
+ web_group = config_get(CONFIG_SECTION_WEB, "web files group", (gr)?(gr->gr_name?gr->gr_name:""):"");
+ if(!web_group || !*web_group)
+ owner_gid = getegid();
+ else {
+ // getgrnam() is not thread safe,
+ // but we have called this function once
+ // while single threaded
+ gr = getgrnam(web_group);
+ if(!gr) {
+ error("Group '%s' is not present. Ignoring option.", web_group);
+ owner_gid = getegid();
+ }
+ else {
+ debug(D_WEB_CLIENT, "Web files group set to %s.", web_group);
+ owner_gid = gr->gr_gid;
+ }
+ }
+ }
+
+ return(owner_gid);
+}
+
+static struct {
+ const char *extension;
+ uint32_t hash;
+ uint8_t contenttype;
+} mime_types[] = {
+ { "html" , 0 , CT_TEXT_HTML}
+ , {"js" , 0 , CT_APPLICATION_X_JAVASCRIPT}
+ , {"css" , 0 , CT_TEXT_CSS}
+ , {"xml" , 0 , CT_TEXT_XML}
+ , {"xsl" , 0 , CT_TEXT_XSL}
+ , {"txt" , 0 , CT_TEXT_PLAIN}
+ , {"svg" , 0 , CT_IMAGE_SVG_XML}
+ , {"ttf" , 0 , CT_APPLICATION_X_FONT_TRUETYPE}
+ , {"otf" , 0 , CT_APPLICATION_X_FONT_OPENTYPE}
+ , {"woff2", 0 , CT_APPLICATION_FONT_WOFF2}
+ , {"woff" , 0 , CT_APPLICATION_FONT_WOFF}
+ , {"eot" , 0 , CT_APPLICATION_VND_MS_FONTOBJ}
+ , {"png" , 0 , CT_IMAGE_PNG}
+ , {"jpg" , 0 , CT_IMAGE_JPG}
+ , {"jpeg" , 0 , CT_IMAGE_JPG}
+ , {"gif" , 0 , CT_IMAGE_GIF}
+ , {"bmp" , 0 , CT_IMAGE_BMP}
+ , {"ico" , 0 , CT_IMAGE_XICON}
+ , {"icns" , 0 , CT_IMAGE_ICNS}
+ , { NULL, 0, 0}
+};
+
+static inline uint8_t contenttype_for_filename(const char *filename) {
+ // info("checking filename '%s'", filename);
+
+ static int initialized = 0;
+ int i;
+
+ if(unlikely(!initialized)) {
+ for (i = 0; mime_types[i].extension; i++)
+ mime_types[i].hash = simple_hash(mime_types[i].extension);
+
+ initialized = 1;
+ }
+
+ const char *s = filename, *last_dot = NULL;
+
+ // find the last dot
+ while(*s) {
+ if(unlikely(*s == '.')) last_dot = s;
+ s++;
+ }
+
+ if(unlikely(!last_dot || !*last_dot || !last_dot[1])) {
+ // info("no extension for filename '%s'", filename);
+ return CT_APPLICATION_OCTET_STREAM;
+ }
+ last_dot++;
+
+ // info("extension for filename '%s' is '%s'", filename, last_dot);
+
+ uint32_t hash = simple_hash(last_dot);
+ for(i = 0; mime_types[i].extension ; i++) {
+ if(unlikely(hash == mime_types[i].hash && !strcmp(last_dot, mime_types[i].extension))) {
+ // info("matched extension for filename '%s': '%s'", filename, last_dot);
+ return mime_types[i].contenttype;
+ }
+ }
+
+ // info("not matched extension for filename '%s': '%s'", filename, last_dot);
+ return CT_APPLICATION_OCTET_STREAM;
+}
+
+static inline int access_to_file_is_not_permitted(struct web_client *w, const char *filename) {
+ w->response.data->contenttype = CT_TEXT_HTML;
+ buffer_strcat(w->response.data, "Access to file is not permitted: ");
+ buffer_strcat_htmlescape(w->response.data, filename);
+ return HTTP_RESP_FORBIDDEN;
+}
+
+// Work around a bug in the CMocka library by removing this function during testing.
+#ifndef REMOVE_MYSENDFILE
+int mysendfile(struct web_client *w, char *filename) {
+ debug(D_WEB_CLIENT, "%llu: Looking for file '%s/%s'", w->id, netdata_configured_web_dir, filename);
+
+ if(!web_client_can_access_dashboard(w))
+ return web_client_permission_denied(w);
+
+ // skip leading slashes
+ while (*filename == '/') filename++;
+
+ // if the filename contains "strange" characters, refuse to serve it
+ char *s;
+ for(s = filename; *s ;s++) {
+ if( !isalnum(*s) && *s != '/' && *s != '.' && *s != '-' && *s != '_') {
+ debug(D_WEB_CLIENT_ACCESS, "%llu: File '%s' is not acceptable.", w->id, filename);
+ w->response.data->contenttype = CT_TEXT_HTML;
+ buffer_sprintf(w->response.data, "Filename contains invalid characters: ");
+ buffer_strcat_htmlescape(w->response.data, filename);
+ return HTTP_RESP_BAD_REQUEST;
+ }
+ }
+
+ // if the filename contains a .. refuse to serve it
+ if(strstr(filename, "..") != 0) {
+ debug(D_WEB_CLIENT_ACCESS, "%llu: File '%s' is not acceptable.", w->id, filename);
+ w->response.data->contenttype = CT_TEXT_HTML;
+ buffer_strcat(w->response.data, "Relative filenames are not supported: ");
+ buffer_strcat_htmlescape(w->response.data, filename);
+ return HTTP_RESP_BAD_REQUEST;
+ }
+
+ // find the physical file on disk
+ char webfilename[FILENAME_MAX + 1];
+ snprintfz(webfilename, FILENAME_MAX, "%s/%s", netdata_configured_web_dir, filename);
+
+ struct stat statbuf;
+ int done = 0;
+ while(!done) {
+ // check if the file exists
+ if (lstat(webfilename, &statbuf) != 0) {
+ debug(D_WEB_CLIENT_ACCESS, "%llu: File '%s' is not found.", w->id, webfilename);
+ w->response.data->contenttype = CT_TEXT_HTML;
+ buffer_strcat(w->response.data, "File does not exist, or is not accessible: ");
+ buffer_strcat_htmlescape(w->response.data, webfilename);
+ return HTTP_RESP_NOT_FOUND;
+ }
+
+ if ((statbuf.st_mode & S_IFMT) == S_IFDIR) {
+ snprintfz(webfilename, FILENAME_MAX, "%s/%s/index.html", netdata_configured_web_dir, filename);
+ continue;
+ }
+
+ if ((statbuf.st_mode & S_IFMT) != S_IFREG) {
+ error("%llu: File '%s' is not a regular file. Access Denied.", w->id, webfilename);
+ return access_to_file_is_not_permitted(w, webfilename);
+ }
+
+ // check if the file is owned by expected user
+ if (statbuf.st_uid != web_files_uid()) {
+ error("%llu: File '%s' is owned by user %u (expected user %u). Access Denied.", w->id, webfilename, statbuf.st_uid, web_files_uid());
+ return access_to_file_is_not_permitted(w, webfilename);
+ }
+
+ // check if the file is owned by expected group
+ if (statbuf.st_gid != web_files_gid()) {
+ error("%llu: File '%s' is owned by group %u (expected group %u). Access Denied.", w->id, webfilename, statbuf.st_gid, web_files_gid());
+ return access_to_file_is_not_permitted(w, webfilename);
+ }
+
+ done = 1;
+ }
+
+ // open the file
+ w->ifd = open(webfilename, O_NONBLOCK, O_RDONLY);
+ if(w->ifd == -1) {
+ w->ifd = w->ofd;
+
+ if(errno == EBUSY || errno == EAGAIN) {
+ error("%llu: File '%s' is busy, sending 307 Moved Temporarily to force retry.", w->id, webfilename);
+ w->response.data->contenttype = CT_TEXT_HTML;
+ buffer_sprintf(w->response.header, "Location: /%s\r\n", filename);
+ buffer_strcat(w->response.data, "File is currently busy, please try again later: ");
+ buffer_strcat_htmlescape(w->response.data, webfilename);
+ return HTTP_RESP_REDIR_TEMP;
+ }
+ else {
+ error("%llu: Cannot open file '%s'.", w->id, webfilename);
+ w->response.data->contenttype = CT_TEXT_HTML;
+ buffer_strcat(w->response.data, "Cannot open file: ");
+ buffer_strcat_htmlescape(w->response.data, webfilename);
+ return HTTP_RESP_NOT_FOUND;
+ }
+ }
+
+ sock_setnonblock(w->ifd);
+
+ w->response.data->contenttype = contenttype_for_filename(webfilename);
+ debug(D_WEB_CLIENT_ACCESS, "%llu: Sending file '%s' (%ld bytes, ifd %d, ofd %d).", w->id, webfilename, statbuf.st_size, w->ifd, w->ofd);
+
+ w->mode = WEB_CLIENT_MODE_FILECOPY;
+ web_client_enable_wait_receive(w);
+ web_client_disable_wait_send(w);
+ buffer_flush(w->response.data);
+ buffer_need_bytes(w->response.data, (size_t)statbuf.st_size);
+ w->response.rlen = (size_t)statbuf.st_size;
+#ifdef __APPLE__
+ w->response.data->date = statbuf.st_mtimespec.tv_sec;
+#else
+ w->response.data->date = statbuf.st_mtim.tv_sec;
+#endif
+ buffer_cacheable(w->response.data);
+
+ return HTTP_RESP_OK;
+}
+#endif
+
+
+
+#ifdef NETDATA_WITH_ZLIB
+void web_client_enable_deflate(struct web_client *w, int gzip) {
+ if(unlikely(w->response.zinitialized)) {
+ debug(D_DEFLATE, "%llu: Compression has already be initialized for this client.", w->id);
+ return;
+ }
+
+ if(unlikely(w->response.sent)) {
+ error("%llu: Cannot enable compression in the middle of a conversation.", w->id);
+ return;
+ }
+
+ w->response.zstream.zalloc = Z_NULL;
+ w->response.zstream.zfree = Z_NULL;
+ w->response.zstream.opaque = Z_NULL;
+
+ w->response.zstream.next_in = (Bytef *)w->response.data->buffer;
+ w->response.zstream.avail_in = 0;
+ w->response.zstream.total_in = 0;
+
+ w->response.zstream.next_out = w->response.zbuffer;
+ w->response.zstream.avail_out = 0;
+ w->response.zstream.total_out = 0;
+
+ w->response.zstream.zalloc = Z_NULL;
+ w->response.zstream.zfree = Z_NULL;
+ w->response.zstream.opaque = Z_NULL;
+
+// if(deflateInit(&w->response.zstream, Z_DEFAULT_COMPRESSION) != Z_OK) {
+// error("%llu: Failed to initialize zlib. Proceeding without compression.", w->id);
+// return;
+// }
+
+ // Select GZIP compression: windowbits = 15 + 16 = 31
+ if(deflateInit2(&w->response.zstream, web_gzip_level, Z_DEFLATED, 15 + ((gzip)?16:0), 8, web_gzip_strategy) != Z_OK) {
+ error("%llu: Failed to initialize zlib. Proceeding without compression.", w->id);
+ return;
+ }
+
+ w->response.zsent = 0;
+ w->response.zoutput = 1;
+ w->response.zinitialized = 1;
+ w->flags |= WEB_CLIENT_CHUNKED_TRANSFER;
+
+ debug(D_DEFLATE, "%llu: Initialized compression.", w->id);
+}
+#endif // NETDATA_WITH_ZLIB
+
+void buffer_data_options2string(BUFFER *wb, uint32_t options) {
+ int count = 0;
+
+ if(options & RRDR_OPTION_NONZERO) {
+ if(count++) buffer_strcat(wb, " ");
+ buffer_strcat(wb, "nonzero");
+ }
+
+ if(options & RRDR_OPTION_REVERSED) {
+ if(count++) buffer_strcat(wb, " ");
+ buffer_strcat(wb, "flip");
+ }
+
+ if(options & RRDR_OPTION_JSON_WRAP) {
+ if(count++) buffer_strcat(wb, " ");
+ buffer_strcat(wb, "jsonwrap");
+ }
+
+ if(options & RRDR_OPTION_MIN2MAX) {
+ if(count++) buffer_strcat(wb, " ");
+ buffer_strcat(wb, "min2max");
+ }
+
+ if(options & RRDR_OPTION_MILLISECONDS) {
+ if(count++) buffer_strcat(wb, " ");
+ buffer_strcat(wb, "ms");
+ }
+
+ if(options & RRDR_OPTION_ABSOLUTE) {
+ if(count++) buffer_strcat(wb, " ");
+ buffer_strcat(wb, "absolute");
+ }
+
+ if(options & RRDR_OPTION_SECONDS) {
+ if(count++) buffer_strcat(wb, " ");
+ buffer_strcat(wb, "seconds");
+ }
+
+ if(options & RRDR_OPTION_NULL2ZERO) {
+ if(count++) buffer_strcat(wb, " ");
+ buffer_strcat(wb, "null2zero");
+ }
+
+ if(options & RRDR_OPTION_OBJECTSROWS) {
+ if(count++) buffer_strcat(wb, " ");
+ buffer_strcat(wb, "objectrows");
+ }
+
+ if(options & RRDR_OPTION_GOOGLE_JSON) {
+ if(count++) buffer_strcat(wb, " ");
+ buffer_strcat(wb, "google_json");
+ }
+
+ if(options & RRDR_OPTION_PERCENTAGE) {
+ if(count++) buffer_strcat(wb, " ");
+ buffer_strcat(wb, "percentage");
+ }
+
+ if(options & RRDR_OPTION_NOT_ALIGNED) {
+ if(count++) buffer_strcat(wb, " ");
+ buffer_strcat(wb, "unaligned");
+ }
+}
+
+static inline int check_host_and_call(RRDHOST *host, struct web_client *w, char *url, int (*func)(RRDHOST *, struct web_client *, char *)) {
+ //if(unlikely(host->rrd_memory_mode == RRD_MEMORY_MODE_NONE)) {
+ // buffer_flush(w->response.data);
+ // buffer_strcat(w->response.data, "This host does not maintain a database");
+ // return HTTP_RESP_BAD_REQUEST;
+ //}
+
+ return func(host, w, url);
+}
+
+static inline int check_host_and_dashboard_acl_and_call(RRDHOST *host, struct web_client *w, char *url, int (*func)(RRDHOST *, struct web_client *, char *)) {
+ if(!web_client_can_access_dashboard(w))
+ return web_client_permission_denied(w);
+
+ return check_host_and_call(host, w, url, func);
+}
+
+static inline int check_host_and_mgmt_acl_and_call(RRDHOST *host, struct web_client *w, char *url, int (*func)(RRDHOST *, struct web_client *, char *)) {
+ if(!web_client_can_access_mgmt(w))
+ return web_client_permission_denied(w);
+
+ return check_host_and_call(host, w, url, func);
+}
+
+int web_client_api_request(RRDHOST *host, struct web_client *w, char *url)
+{
+ // get the api version
+ char *tok = mystrsep(&url, "/");
+ if(tok && *tok) {
+ debug(D_WEB_CLIENT, "%llu: Searching for API version '%s'.", w->id, tok);
+ if(strcmp(tok, "v1") == 0)
+ return web_client_api_request_v1(host, w, url);
+ else {
+ buffer_flush(w->response.data);
+ w->response.data->contenttype = CT_TEXT_HTML;
+ buffer_strcat(w->response.data, "Unsupported API version: ");
+ buffer_strcat_htmlescape(w->response.data, tok);
+ return HTTP_RESP_NOT_FOUND;
+ }
+ }
+ else {
+ buffer_flush(w->response.data);
+ buffer_sprintf(w->response.data, "Which API version?");
+ return HTTP_RESP_BAD_REQUEST;
+ }
+}
+
+const char *web_content_type_to_string(uint8_t contenttype) {
+ switch(contenttype) {
+ case CT_TEXT_HTML:
+ return "text/html; charset=utf-8";
+
+ case CT_APPLICATION_XML:
+ return "application/xml; charset=utf-8";
+
+ case CT_APPLICATION_JSON:
+ return "application/json; charset=utf-8";
+
+ case CT_APPLICATION_X_JAVASCRIPT:
+ return "application/x-javascript; charset=utf-8";
+
+ case CT_TEXT_CSS:
+ return "text/css; charset=utf-8";
+
+ case CT_TEXT_XML:
+ return "text/xml; charset=utf-8";
+
+ case CT_TEXT_XSL:
+ return "text/xsl; charset=utf-8";
+
+ case CT_APPLICATION_OCTET_STREAM:
+ return "application/octet-stream";
+
+ case CT_IMAGE_SVG_XML:
+ return "image/svg+xml";
+
+ case CT_APPLICATION_X_FONT_TRUETYPE:
+ return "application/x-font-truetype";
+
+ case CT_APPLICATION_X_FONT_OPENTYPE:
+ return "application/x-font-opentype";
+
+ case CT_APPLICATION_FONT_WOFF:
+ return "application/font-woff";
+
+ case CT_APPLICATION_FONT_WOFF2:
+ return "application/font-woff2";
+
+ case CT_APPLICATION_VND_MS_FONTOBJ:
+ return "application/vnd.ms-fontobject";
+
+ case CT_IMAGE_PNG:
+ return "image/png";
+
+ case CT_IMAGE_JPG:
+ return "image/jpeg";
+
+ case CT_IMAGE_GIF:
+ return "image/gif";
+
+ case CT_IMAGE_XICON:
+ return "image/x-icon";
+
+ case CT_IMAGE_BMP:
+ return "image/bmp";
+
+ case CT_IMAGE_ICNS:
+ return "image/icns";
+
+ case CT_PROMETHEUS:
+ return "text/plain; version=0.0.4";
+
+ default:
+ case CT_TEXT_PLAIN:
+ return "text/plain; charset=utf-8";
+ }
+}
+
+
+const char *web_response_code_to_string(int code) {
+ switch(code) {
+ case HTTP_RESP_OK:
+ return "OK";
+
+ case HTTP_RESP_MOVED_PERM:
+ return "Moved Permanently";
+
+ case HTTP_RESP_REDIR_TEMP:
+ return "Temporary Redirect";
+
+ case HTTP_RESP_BAD_REQUEST:
+ return "Bad Request";
+
+ case HTTP_RESP_FORBIDDEN:
+ return "Forbidden";
+
+ case HTTP_RESP_NOT_FOUND:
+ return "Not Found";
+
+ case HTTP_RESP_PRECOND_FAIL:
+ return "Preconditions Failed";
+
+ default:
+ if(code >= 100 && code < 200)
+ return "Informational";
+
+ if(code >= 200 && code < 300)
+ return "Successful";
+
+ if(code >= 300 && code < 400)
+ return "Redirection";
+
+ if(code >= 400 && code < 500)
+ return "Bad Request";
+
+ if(code >= 500 && code < 600)
+ return "Server Error";
+
+ return "Undefined Error";
+ }
+}
+
+static inline char *http_header_parse(struct web_client *w, char *s, int parse_useragent) {
+ static uint32_t hash_origin = 0, hash_connection = 0, hash_donottrack = 0, hash_useragent = 0,
+ hash_authorization = 0, hash_host = 0, hash_forwarded_proto = 0, hash_forwarded_host = 0;
+#ifdef NETDATA_WITH_ZLIB
+ static uint32_t hash_accept_encoding = 0;
+#endif
+
+ if(unlikely(!hash_origin)) {
+ hash_origin = simple_uhash("Origin");
+ hash_connection = simple_uhash("Connection");
+#ifdef NETDATA_WITH_ZLIB
+ hash_accept_encoding = simple_uhash("Accept-Encoding");
+#endif
+ hash_donottrack = simple_uhash("DNT");
+ hash_useragent = simple_uhash("User-Agent");
+ hash_authorization = simple_uhash("X-Auth-Token");
+ hash_host = simple_uhash("Host");
+ hash_forwarded_proto = simple_uhash("X-Forwarded-Proto");
+ hash_forwarded_host = simple_uhash("X-Forwarded-Host");
+ }
+
+ char *e = s;
+
+ // find the :
+ while(*e && *e != ':') e++;
+ if(!*e) return e;
+
+ // get the name
+ *e = '\0';
+
+ // find the value
+ char *v = e + 1, *ve;
+
+ // skip leading spaces from value
+ while(*v == ' ') v++;
+ ve = v;
+
+ // find the \r
+ while(*ve && *ve != '\r') ve++;
+ if(!*ve || ve[1] != '\n') {
+ *e = ':';
+ return ve;
+ }
+
+ // terminate the value
+ *ve = '\0';
+
+ uint32_t hash = simple_uhash(s);
+
+ if(hash == hash_origin && !strcasecmp(s, "Origin"))
+ strncpyz(w->origin, v, NETDATA_WEB_REQUEST_ORIGIN_HEADER_SIZE);
+
+ else if(hash == hash_connection && !strcasecmp(s, "Connection")) {
+ if(strcasestr(v, "keep-alive"))
+ web_client_enable_keepalive(w);
+ }
+ else if(respect_web_browser_do_not_track_policy && hash == hash_donottrack && !strcasecmp(s, "DNT")) {
+ if(*v == '0') web_client_disable_donottrack(w);
+ else if(*v == '1') web_client_enable_donottrack(w);
+ }
+ else if(parse_useragent && hash == hash_useragent && !strcasecmp(s, "User-Agent")) {
+ w->user_agent = strdupz(v);
+ } else if(hash == hash_authorization&& !strcasecmp(s, "X-Auth-Token")) {
+ w->auth_bearer_token = strdupz(v);
+ }
+ else if(hash == hash_host && !strcasecmp(s, "Host")){
+ strncpyz(w->server_host, v, ((size_t)(ve - v) < sizeof(w->server_host)-1 ? (size_t)(ve - v) : sizeof(w->server_host)-1));
+ }
+#ifdef NETDATA_WITH_ZLIB
+ else if(hash == hash_accept_encoding && !strcasecmp(s, "Accept-Encoding")) {
+ if(web_enable_gzip) {
+ if(strcasestr(v, "gzip"))
+ web_client_enable_deflate(w, 1);
+ //
+ // does not seem to work
+ // else if(strcasestr(v, "deflate"))
+ // web_client_enable_deflate(w, 0);
+ }
+ }
+#endif /* NETDATA_WITH_ZLIB */
+#ifdef ENABLE_HTTPS
+ else if(hash == hash_forwarded_proto && !strcasecmp(s, "X-Forwarded-Proto")) {
+ if(strcasestr(v, "https"))
+ w->ssl.flags |= NETDATA_SSL_PROXY_HTTPS;
+ }
+#endif
+ else if(hash == hash_forwarded_host && !strcasecmp(s, "X-Forwarded-Host")){
+ strncpyz(w->forwarded_host, v, ((size_t)(ve - v) < sizeof(w->server_host)-1 ? (size_t)(ve - v) : sizeof(w->server_host)-1));
+ }
+
+ *e = ':';
+ *ve = '\r';
+ return ve;
+}
+
+/**
+ * Valid Method
+ *
+ * Netdata accepts only three methods, including one of these three(STREAM) is an internal method.
+ *
+ * @param w is the structure with the client request
+ * @param s is the start string to parse
+ *
+ * @return it returns the next address to parse case the method is valid and NULL otherwise.
+ */
+static inline char *web_client_valid_method(struct web_client *w, char *s) {
+ // is is a valid request?
+ if(!strncmp(s, "GET ", 4)) {
+ s = &s[4];
+ w->mode = WEB_CLIENT_MODE_NORMAL;
+ }
+ else if(!strncmp(s, "OPTIONS ", 8)) {
+ s = &s[8];
+ w->mode = WEB_CLIENT_MODE_OPTIONS;
+ }
+ else if(!strncmp(s, "STREAM ", 7)) {
+ s = &s[7];
+
+#ifdef ENABLE_HTTPS
+ if (w->ssl.flags && web_client_is_using_ssl_force(w)){
+ w->header_parse_tries = 0;
+ w->header_parse_last_size = 0;
+ web_client_disable_wait_receive(w);
+
+ char hostname[256];
+ char *copyme = strstr(s,"hostname=");
+ if ( copyme ){
+ copyme += 9;
+ char *end = strchr(copyme,'&');
+ if(end){
+ size_t length = MIN(255, end - copyme);
+ memcpy(hostname,copyme,length);
+ hostname[length] = 0X00;
+ }
+ else{
+ memcpy(hostname,"not available",13);
+ hostname[13] = 0x00;
+ }
+ }
+ else{
+ memcpy(hostname,"not available",13);
+ hostname[13] = 0x00;
+ }
+ error("The server is configured to always use encrypted connections, please enable the SSL on child with hostname '%s'.",hostname);
+ s = NULL;
+ }
+#endif
+
+ w->mode = WEB_CLIENT_MODE_STREAM;
+ }
+ else {
+ s = NULL;
+ }
+
+ return s;
+}
+
+/**
+ * Set Path Query
+ *
+ * Set the pointers to the path and query string according to the input.
+ *
+ * @param w is the structure with the client request
+ * @param s is the first address of the string.
+ * @param ptr is the address of the separator.
+ */
+static void web_client_set_path_query(struct web_client *w, char *s, char *ptr) {
+ w->url_path_length = (size_t)(ptr -s);
+
+ w->url_search_path = ptr;
+}
+
+/**
+ * Split path query
+ *
+ * Do the separation between path and query string
+ *
+ * @param w is the structure with the client request
+ * @param s is the string to parse
+ */
+void web_client_split_path_query(struct web_client *w, char *s) {
+ //I am assuming here that the separator character(?) is not encoded
+ char *ptr = strchr(s, '?');
+ if(ptr) {
+ w->separator = '?';
+ web_client_set_path_query(w, s, ptr);
+ return;
+ }
+
+ //Here I test the second possibility, the URL is completely encoded by the user.
+ //I am not using the strcasestr, because it is fastest to check %3f and compare
+ //the next character.
+ //We executed some tests with "encodeURI(uri);" described in https://www.w3schools.com/jsref/jsref_encodeuri.asp
+ //on July 1st, 2019, that show us that URLs won't have '?','=' and '&' encoded, but we decided to move in front
+ //with the next part, because users can develop their own encoded that won't follow this rule.
+ char *moveme = s;
+ while (moveme) {
+ ptr = strchr(moveme, '%');
+ if(ptr) {
+ char *test = (ptr+1);
+ if (!strncmp(test, "3f", 2) || !strncmp(test, "3F", 2)) {
+ w->separator = *ptr;
+ web_client_set_path_query(w, s, ptr);
+ return;
+ }
+ ptr++;
+ }
+
+ moveme = ptr;
+ }
+
+ w->separator = 0x00;
+ w->url_path_length = strlen(s);
+}
+
+/**
+ * Request validate
+ *
+ * @param w is the structure with the client request
+ *
+ * @return It returns HTTP_VALIDATION_OK on success and another code present
+ * in the enum HTTP_VALIDATION otherwise.
+ */
+static inline HTTP_VALIDATION http_request_validate(struct web_client *w) {
+ char *s = (char *)buffer_tostring(w->response.data), *encoded_url = NULL;
+
+ size_t last_pos = w->header_parse_last_size;
+
+ w->header_parse_tries++;
+ w->header_parse_last_size = buffer_strlen(w->response.data);
+
+ int is_it_valid;
+ if(w->header_parse_tries > 1) {
+ if(last_pos > 4) last_pos -= 4; // allow searching for \r\n\r\n
+ else last_pos = 0;
+
+ if(w->header_parse_last_size < last_pos)
+ last_pos = 0;
+
+ is_it_valid = url_is_request_complete(s, &s[last_pos], w->header_parse_last_size);
+ if(!is_it_valid) {
+ if(w->header_parse_tries > 10) {
+ info("Disabling slow client after %zu attempts to read the request (%zu bytes received)", w->header_parse_tries, buffer_strlen(w->response.data));
+ w->header_parse_tries = 0;
+ w->header_parse_last_size = 0;
+ web_client_disable_wait_receive(w);
+ return HTTP_VALIDATION_NOT_SUPPORTED;
+ }
+
+ return HTTP_VALIDATION_INCOMPLETE;
+ }
+
+ is_it_valid = 1;
+ } else {
+ last_pos = w->header_parse_last_size;
+ is_it_valid = url_is_request_complete(s, &s[last_pos], w->header_parse_last_size);
+ }
+
+ s = web_client_valid_method(w, s);
+ if (!s) {
+ w->header_parse_tries = 0;
+ w->header_parse_last_size = 0;
+ web_client_disable_wait_receive(w);
+
+ return HTTP_VALIDATION_NOT_SUPPORTED;
+ } else if (!is_it_valid) {
+ //Invalid request, we have more data after the end of message
+ char *check = strstr((char *)buffer_tostring(w->response.data), "\r\n\r\n");
+ if(check) {
+ check += 4;
+ if (*check) {
+ w->header_parse_tries = 0;
+ w->header_parse_last_size = 0;
+ web_client_disable_wait_receive(w);
+ return HTTP_VALIDATION_NOT_SUPPORTED;
+ }
+ }
+
+ web_client_enable_wait_receive(w);
+ return HTTP_VALIDATION_INCOMPLETE;
+ }
+
+ //After the method we have the path and query string together
+ encoded_url = s;
+
+ //we search for the position where we have " HTTP/", because it finishes the user request
+ s = url_find_protocol(s);
+
+ // incomplete requests
+ if(unlikely(!*s)) {
+ web_client_enable_wait_receive(w);
+ return HTTP_VALIDATION_INCOMPLETE;
+ }
+
+ // we have the end of encoded_url - remember it
+ char *ue = s;
+
+ //Variables used to map the variables in the query string case it is present
+ int total_variables;
+ char *ptr_variables[WEB_FIELDS_MAX];
+
+ // make sure we have complete request
+ // complete requests contain: \r\n\r\n
+ while(*s) {
+ // find a line feed
+ while(*s && *s++ != '\r');
+
+ // did we reach the end?
+ if(unlikely(!*s)) break;
+
+ // is it \r\n ?
+ if(likely(*s++ == '\n')) {
+
+ // is it again \r\n ? (header end)
+ if(unlikely(*s == '\r' && s[1] == '\n')) {
+ // a valid complete HTTP request found
+
+ *ue = '\0';
+ //This is to avoid crash in line
+ w->url_search_path = NULL;
+ if(w->mode != WEB_CLIENT_MODE_NORMAL) {
+ if(!url_decode_r(w->decoded_url, encoded_url, NETDATA_WEB_REQUEST_URL_SIZE + 1))
+ return HTTP_VALIDATION_MALFORMED_URL;
+ } else {
+ web_client_split_path_query(w, encoded_url);
+
+ if (w->url_search_path && w->separator) {
+ *w->url_search_path = 0x00;
+ }
+
+ if(!url_decode_r(w->decoded_url, encoded_url, NETDATA_WEB_REQUEST_URL_SIZE + 1))
+ return HTTP_VALIDATION_MALFORMED_URL;
+
+ if (w->url_search_path && w->separator) {
+ *w->url_search_path = w->separator;
+
+ char *from = (encoded_url + w->url_path_length);
+ total_variables = url_map_query_string(ptr_variables, from);
+
+ if (url_parse_query_string(w->decoded_query_string, NETDATA_WEB_REQUEST_URL_SIZE + 1, ptr_variables, total_variables)) {
+ return HTTP_VALIDATION_MALFORMED_URL;
+ }
+ }
+ }
+ *ue = ' ';
+
+ // copy the URL - we are going to overwrite parts of it
+ // TODO -- ideally we we should avoid copying buffers around
+ strncpyz(w->last_url, w->decoded_url, NETDATA_WEB_REQUEST_URL_SIZE);
+#ifdef ENABLE_HTTPS
+ if ( (!web_client_check_unix(w)) && (netdata_srv_ctx) ) {
+ if ((w->ssl.conn) && ((w->ssl.flags & NETDATA_SSL_NO_HANDSHAKE) && (web_client_is_using_ssl_force(w) || web_client_is_using_ssl_default(w)) && (w->mode != WEB_CLIENT_MODE_STREAM)) ) {
+ w->header_parse_tries = 0;
+ w->header_parse_last_size = 0;
+ // The client will be redirected for Netdata and we are preserving the original request.
+ *ue = '\0';
+ strncpyz(w->last_url, encoded_url, NETDATA_WEB_REQUEST_URL_SIZE);
+ *ue = ' ';
+ web_client_disable_wait_receive(w);
+ return HTTP_VALIDATION_REDIRECT;
+ }
+ }
+#endif
+
+ w->header_parse_tries = 0;
+ w->header_parse_last_size = 0;
+ web_client_disable_wait_receive(w);
+ return HTTP_VALIDATION_OK;
+ }
+
+ // another header line
+ s = http_header_parse(w, s,
+ (w->mode == WEB_CLIENT_MODE_STREAM) // parse user agent
+ );
+ }
+ }
+
+ // incomplete request
+ web_client_enable_wait_receive(w);
+ return HTTP_VALIDATION_INCOMPLETE;
+}
+
+static inline ssize_t web_client_send_data(struct web_client *w,const void *buf,size_t len, int flags)
+{
+ ssize_t bytes;
+#ifdef ENABLE_HTTPS
+ if ( (!web_client_check_unix(w)) && (netdata_srv_ctx) ) {
+ if ( ( w->ssl.conn ) && ( !w->ssl.flags ) ){
+ bytes = SSL_write(w->ssl.conn,buf, len) ;
+ } else {
+ bytes = send(w->ofd,buf, len , flags);
+ }
+ } else {
+ bytes = send(w->ofd,buf, len , flags);
+ }
+#else
+ bytes = send(w->ofd, buf, len, flags);
+#endif
+
+ return bytes;
+}
+
+void web_client_build_http_header(struct web_client *w) {
+ if(unlikely(w->response.code != HTTP_RESP_OK))
+ buffer_no_cacheable(w->response.data);
+
+ // set a proper expiration date, if not already set
+ if(unlikely(!w->response.data->expires)) {
+ if(w->response.data->options & WB_CONTENT_NO_CACHEABLE)
+ w->response.data->expires = w->tv_ready.tv_sec + localhost->rrd_update_every;
+ else
+ w->response.data->expires = w->tv_ready.tv_sec + 86400;
+ }
+
+ // prepare the HTTP response header
+ debug(D_WEB_CLIENT, "%llu: Generating HTTP header with response %d.", w->id, w->response.code);
+
+ const char *content_type_string = web_content_type_to_string(w->response.data->contenttype);
+ const char *code_msg = web_response_code_to_string(w->response.code);
+
+ // prepare the last modified and expiration dates
+ char date[32], edate[32];
+ {
+ struct tm tmbuf, *tm;
+
+ tm = gmtime_r(&w->response.data->date, &tmbuf);
+ strftime(date, sizeof(date), "%a, %d %b %Y %H:%M:%S %Z", tm);
+
+ tm = gmtime_r(&w->response.data->expires, &tmbuf);
+ strftime(edate, sizeof(edate), "%a, %d %b %Y %H:%M:%S %Z", tm);
+ }
+
+ if (w->response.code == HTTP_RESP_MOVED_PERM) {
+ buffer_sprintf(w->response.header_output,
+ "HTTP/1.1 %d %s\r\n"
+ "Location: https://%s%s\r\n",
+ w->response.code, code_msg,
+ w->server_host,
+ w->last_url);
+ }else {
+ buffer_sprintf(w->response.header_output,
+ "HTTP/1.1 %d %s\r\n"
+ "Connection: %s\r\n"
+ "Server: NetData Embedded HTTP Server %s\r\n"
+ "Access-Control-Allow-Origin: %s\r\n"
+ "Access-Control-Allow-Credentials: true\r\n"
+ "Content-Type: %s\r\n"
+ "Date: %s\r\n",
+ w->response.code,
+ code_msg,
+ web_client_has_keepalive(w)?"keep-alive":"close",
+ VERSION,
+ w->origin,
+ content_type_string,
+ date);
+ }
+
+ if(unlikely(web_x_frame_options))
+ buffer_sprintf(w->response.header_output, "X-Frame-Options: %s\r\n", web_x_frame_options);
+
+ if(w->cookie1[0] || w->cookie2[0]) {
+ if(w->cookie1[0]) {
+ buffer_sprintf(w->response.header_output,
+ "Set-Cookie: %s\r\n",
+ w->cookie1);
+ }
+
+ if(w->cookie2[0]) {
+ buffer_sprintf(w->response.header_output,
+ "Set-Cookie: %s\r\n",
+ w->cookie2);
+ }
+
+ if(respect_web_browser_do_not_track_policy)
+ buffer_sprintf(w->response.header_output,
+ "Tk: T;cookies\r\n");
+ }
+ else {
+ if(respect_web_browser_do_not_track_policy) {
+ if(web_client_has_tracking_required(w))
+ buffer_sprintf(w->response.header_output,
+ "Tk: T;cookies\r\n");
+ else
+ buffer_sprintf(w->response.header_output,
+ "Tk: N\r\n");
+ }
+ }
+
+ if(w->mode == WEB_CLIENT_MODE_OPTIONS) {
+ buffer_strcat(w->response.header_output,
+ "Access-Control-Allow-Methods: GET, OPTIONS\r\n"
+ "Access-Control-Allow-Headers: accept, x-requested-with, origin, content-type, cookie, pragma, cache-control, x-auth-token\r\n"
+ "Access-Control-Max-Age: 1209600\r\n" // 86400 * 14
+ );
+ }
+ else {
+ buffer_sprintf(w->response.header_output,
+ "Cache-Control: %s\r\n"
+ "Expires: %s\r\n",
+ (w->response.data->options & WB_CONTENT_NO_CACHEABLE)?"no-cache, no-store, must-revalidate\r\nPragma: no-cache":"public",
+ edate);
+ }
+
+ // copy a possibly available custom header
+ if(unlikely(buffer_strlen(w->response.header)))
+ buffer_strcat(w->response.header_output, buffer_tostring(w->response.header));
+
+ // headers related to the transfer method
+ if(likely(w->response.zoutput))
+ buffer_strcat(w->response.header_output, "Content-Encoding: gzip\r\n");
+
+ if(likely(w->flags & WEB_CLIENT_CHUNKED_TRANSFER))
+ buffer_strcat(w->response.header_output, "Transfer-Encoding: chunked\r\n");
+ else {
+ if(likely((w->response.data->len || w->response.rlen))) {
+ // we know the content length, put it
+ buffer_sprintf(w->response.header_output, "Content-Length: %zu\r\n", w->response.data->len? w->response.data->len: w->response.rlen);
+ }
+ else {
+ // we don't know the content length, disable keep-alive
+ web_client_disable_keepalive(w);
+ }
+ }
+
+ // end of HTTP header
+ buffer_strcat(w->response.header_output, "\r\n");
+}
+
+static inline void web_client_send_http_header(struct web_client *w) {
+ web_client_build_http_header(w);
+
+ // sent the HTTP header
+ debug(D_WEB_DATA, "%llu: Sending response HTTP header of size %zu: '%s'"
+ , w->id
+ , buffer_strlen(w->response.header_output)
+ , buffer_tostring(w->response.header_output)
+ );
+
+ web_client_crock_socket(w);
+
+ size_t count = 0;
+ ssize_t bytes;
+#ifdef ENABLE_HTTPS
+ if ( (!web_client_check_unix(w)) && (netdata_srv_ctx) ) {
+ if ( ( w->ssl.conn ) && ( !w->ssl.flags ) ){
+ while((bytes = SSL_write(w->ssl.conn, buffer_tostring(w->response.header_output), buffer_strlen(w->response.header_output))) < 0) {
+ count++;
+ if(count > 100 || (errno != EAGAIN && errno != EWOULDBLOCK)) {
+ error("Cannot send HTTPS headers to web client.");
+ break;
+ }
+ }
+ } else {
+ while((bytes = send(w->ofd, buffer_tostring(w->response.header_output), buffer_strlen(w->response.header_output), 0)) == -1) {
+ count++;
+
+ if(count > 100 || (errno != EAGAIN && errno != EWOULDBLOCK)) {
+ error("Cannot send HTTP headers to web client.");
+ break;
+ }
+ }
+ }
+ } else {
+ while((bytes = send(w->ofd, buffer_tostring(w->response.header_output), buffer_strlen(w->response.header_output), 0)) == -1) {
+ count++;
+
+ if(count > 100 || (errno != EAGAIN && errno != EWOULDBLOCK)) {
+ error("Cannot send HTTP headers to web client.");
+ break;
+ }
+ }
+ }
+#else
+ while((bytes = send(w->ofd, buffer_tostring(w->response.header_output), buffer_strlen(w->response.header_output), 0)) == -1) {
+ count++;
+
+ if(count > 100 || (errno != EAGAIN && errno != EWOULDBLOCK)) {
+ error("Cannot send HTTP headers to web client.");
+ break;
+ }
+ }
+#endif
+
+ if(bytes != (ssize_t) buffer_strlen(w->response.header_output)) {
+ if(bytes > 0)
+ w->stats_sent_bytes += bytes;
+
+ error("HTTP headers failed to be sent (I sent %zu bytes but the system sent %zd bytes). Closing web client."
+ , buffer_strlen(w->response.header_output)
+ , bytes);
+
+ WEB_CLIENT_IS_DEAD(w);
+ return;
+ }
+ else
+ w->stats_sent_bytes += bytes;
+}
+
+static inline int web_client_process_url(RRDHOST *host, struct web_client *w, char *url);
+
+static inline int web_client_switch_host(RRDHOST *host, struct web_client *w, char *url) {
+ static uint32_t hash_localhost = 0;
+
+ if(unlikely(!hash_localhost)) {
+ hash_localhost = simple_hash("localhost");
+ }
+
+ if(host != localhost) {
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "Nesting of hosts is not allowed.");
+ return HTTP_RESP_BAD_REQUEST;
+ }
+
+ char *tok = mystrsep(&url, "/");
+ if(tok && *tok) {
+ debug(D_WEB_CLIENT, "%llu: Searching for host with name '%s'.", w->id, tok);
+
+ if(!url) { //no delim found
+ debug(D_WEB_CLIENT, "%llu: URL doesn't end with / generating redirect.", w->id);
+ char *protocol, *url_host;
+#ifdef ENABLE_HTTPS
+ protocol = ((w->ssl.conn && !w->ssl.flags) || w->ssl.flags & NETDATA_SSL_PROXY_HTTPS) ? "https" : "http";
+#else
+ protocol = "http";
+#endif
+ url_host = (!w->forwarded_host[0])?w->server_host:w->forwarded_host;
+ buffer_sprintf(w->response.header, "Location: %s://%s%s/\r\n", protocol, url_host, w->last_url);
+ buffer_strcat(w->response.data, "Permanent redirect");
+ return HTTP_RESP_REDIR_PERM;
+ }
+
+ // copy the URL, we need it to serve files
+ w->last_url[0] = '/';
+
+ if(url && *url) strncpyz(&w->last_url[1], url, NETDATA_WEB_REQUEST_URL_SIZE - 1);
+ else w->last_url[1] = '\0';
+
+ uint32_t hash = simple_hash(tok);
+
+ host = rrdhost_find_by_hostname(tok, hash);
+ if(!host) host = rrdhost_find_by_guid(tok, hash);
+
+#ifdef ENABLE_DBENGINE
+ int release_host = 0;
+ if (!host) {
+ host = sql_create_host_by_uuid(tok);
+ if (likely(host)) {
+ rrdhost_flag_set(host, RRDHOST_FLAG_ARCHIVED);
+ release_host = 1;
+ }
+ }
+ if(host) {
+ int rc = web_client_process_url(host, w, url);
+ if (release_host) {
+ freez(host->hostname);
+ freez((char *) host->os);
+ freez((char *) host->tags);
+ freez((char *) host->timezone);
+ freez(host->program_name);
+ freez(host->program_version);
+ freez(host->registry_hostname);
+ freez(host);
+ }
+ return rc;
+ }
+#else
+ if (host) return web_client_process_url(host, w, url);
+#endif
+ }
+
+ buffer_flush(w->response.data);
+ w->response.data->contenttype = CT_TEXT_HTML;
+ buffer_strcat(w->response.data, "This netdata does not maintain a database for host: ");
+ buffer_strcat_htmlescape(w->response.data, tok?tok:"");
+ return HTTP_RESP_NOT_FOUND;
+}
+
+static inline int web_client_process_url(RRDHOST *host, struct web_client *w, char *url) {
+ static uint32_t
+ hash_api = 0,
+ hash_netdata_conf = 0,
+ hash_host = 0;
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ static uint32_t hash_exit = 0, hash_debug = 0, hash_mirror = 0;
+#endif
+
+ if(unlikely(!hash_api)) {
+ hash_api = simple_hash("api");
+ hash_netdata_conf = simple_hash("netdata.conf");
+ hash_host = simple_hash("host");
+#ifdef NETDATA_INTERNAL_CHECKS
+ hash_exit = simple_hash("exit");
+ hash_debug = simple_hash("debug");
+ hash_mirror = simple_hash("mirror");
+#endif
+ }
+
+ char *tok = mystrsep(&url, "/?");
+ if(likely(tok && *tok)) {
+ uint32_t hash = simple_hash(tok);
+ debug(D_WEB_CLIENT, "%llu: Processing command '%s'.", w->id, tok);
+
+ if(unlikely(hash == hash_api && strcmp(tok, "api") == 0)) { // current API
+ debug(D_WEB_CLIENT_ACCESS, "%llu: API request ...", w->id);
+ return check_host_and_call(host, w, url, web_client_api_request);
+ }
+ else if(unlikely(hash == hash_host && strcmp(tok, "host") == 0)) { // host switching
+ debug(D_WEB_CLIENT_ACCESS, "%llu: host switch request ...", w->id);
+ return web_client_switch_host(host, w, url);
+ }
+ else if(unlikely(hash == hash_netdata_conf && strcmp(tok, "netdata.conf") == 0)) { // netdata.conf
+ if(unlikely(!web_client_can_access_netdataconf(w)))
+ return web_client_permission_denied(w);
+
+ debug(D_WEB_CLIENT_ACCESS, "%llu: generating netdata.conf ...", w->id);
+ w->response.data->contenttype = CT_TEXT_PLAIN;
+ buffer_flush(w->response.data);
+ config_generate(w->response.data, 0);
+ return HTTP_RESP_OK;
+ }
+#ifdef NETDATA_INTERNAL_CHECKS
+ else if(unlikely(hash == hash_exit && strcmp(tok, "exit") == 0)) {
+ if(unlikely(!web_client_can_access_netdataconf(w)))
+ return web_client_permission_denied(w);
+
+ w->response.data->contenttype = CT_TEXT_PLAIN;
+ buffer_flush(w->response.data);
+
+ if(!netdata_exit)
+ buffer_strcat(w->response.data, "ok, will do...");
+ else
+ buffer_strcat(w->response.data, "I am doing it already");
+
+ error("web request to exit received.");
+ netdata_cleanup_and_exit(0);
+ return HTTP_RESP_OK;
+ }
+ else if(unlikely(hash == hash_debug && strcmp(tok, "debug") == 0)) {
+ if(unlikely(!web_client_can_access_netdataconf(w)))
+ return web_client_permission_denied(w);
+
+ buffer_flush(w->response.data);
+
+ // get the name of the data to show
+ tok = mystrsep(&url, "&");
+ if(tok && *tok) {
+ debug(D_WEB_CLIENT, "%llu: Searching for RRD data with name '%s'.", w->id, tok);
+
+ // do we have such a data set?
+ RRDSET *st = rrdset_find_byname(host, tok);
+ if(!st) st = rrdset_find(host, tok);
+ if(!st) {
+ w->response.data->contenttype = CT_TEXT_HTML;
+ buffer_strcat(w->response.data, "Chart is not found: ");
+ buffer_strcat_htmlescape(w->response.data, tok);
+ debug(D_WEB_CLIENT_ACCESS, "%llu: %s is not found.", w->id, tok);
+ return HTTP_RESP_NOT_FOUND;
+ }
+
+ debug_flags |= D_RRD_STATS;
+
+ if(rrdset_flag_check(st, RRDSET_FLAG_DEBUG))
+ rrdset_flag_clear(st, RRDSET_FLAG_DEBUG);
+ else
+ rrdset_flag_set(st, RRDSET_FLAG_DEBUG);
+
+ w->response.data->contenttype = CT_TEXT_HTML;
+ buffer_sprintf(w->response.data, "Chart has now debug %s: ", rrdset_flag_check(st, RRDSET_FLAG_DEBUG)?"enabled":"disabled");
+ buffer_strcat_htmlescape(w->response.data, tok);
+ debug(D_WEB_CLIENT_ACCESS, "%llu: debug for %s is %s.", w->id, tok, rrdset_flag_check(st, RRDSET_FLAG_DEBUG)?"enabled":"disabled");
+ return HTTP_RESP_OK;
+ }
+
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "debug which chart?\r\n");
+ return HTTP_RESP_BAD_REQUEST;
+ }
+ else if(unlikely(hash == hash_mirror && strcmp(tok, "mirror") == 0)) {
+ if(unlikely(!web_client_can_access_netdataconf(w)))
+ return web_client_permission_denied(w);
+
+ debug(D_WEB_CLIENT_ACCESS, "%llu: Mirroring...", w->id);
+
+ // replace the zero bytes with spaces
+ buffer_char_replace(w->response.data, '\0', ' ');
+
+ // just leave the buffer as is
+ // it will be copied back to the client
+
+ return HTTP_RESP_OK;
+ }
+#endif /* NETDATA_INTERNAL_CHECKS */
+ }
+
+ char filename[FILENAME_MAX+1];
+ url = filename;
+ strncpyz(filename, w->last_url, FILENAME_MAX);
+ tok = mystrsep(&url, "?");
+ buffer_flush(w->response.data);
+ return mysendfile(w, (tok && *tok)?tok:"/");
+}
+
+void web_client_process_request(struct web_client *w) {
+
+ // start timing us
+ now_realtime_timeval(&w->tv_in);
+
+ switch(http_request_validate(w)) {
+ case HTTP_VALIDATION_OK:
+ switch(w->mode) {
+ case WEB_CLIENT_MODE_STREAM:
+ if(unlikely(!web_client_can_access_stream(w))) {
+ web_client_permission_denied(w);
+ return;
+ }
+
+ w->response.code = rrdpush_receiver_thread_spawn(w, w->decoded_url);
+ return;
+
+ case WEB_CLIENT_MODE_OPTIONS:
+ if(unlikely(
+ !web_client_can_access_dashboard(w) &&
+ !web_client_can_access_registry(w) &&
+ !web_client_can_access_badges(w) &&
+ !web_client_can_access_mgmt(w) &&
+ !web_client_can_access_netdataconf(w)
+ )) {
+ web_client_permission_denied(w);
+ break;
+ }
+
+ w->response.data->contenttype = CT_TEXT_PLAIN;
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "OK");
+ w->response.code = HTTP_RESP_OK;
+ break;
+
+ case WEB_CLIENT_MODE_FILECOPY:
+ case WEB_CLIENT_MODE_NORMAL:
+ if(unlikely(
+ !web_client_can_access_dashboard(w) &&
+ !web_client_can_access_registry(w) &&
+ !web_client_can_access_badges(w) &&
+ !web_client_can_access_mgmt(w) &&
+ !web_client_can_access_netdataconf(w)
+ )) {
+ web_client_permission_denied(w);
+ break;
+ }
+
+ w->response.code = web_client_process_url(localhost, w, w->decoded_url);
+ break;
+ }
+ break;
+
+ case HTTP_VALIDATION_INCOMPLETE:
+ if(w->response.data->len > NETDATA_WEB_REQUEST_MAX_SIZE) {
+ strcpy(w->last_url, "too big request");
+
+ debug(D_WEB_CLIENT_ACCESS, "%llu: Received request is too big (%zu bytes).", w->id, w->response.data->len);
+
+ buffer_flush(w->response.data);
+ buffer_sprintf(w->response.data, "Received request is too big (%zu bytes).\r\n", w->response.data->len);
+ w->response.code = HTTP_RESP_BAD_REQUEST;
+ }
+ else {
+ // wait for more data
+ return;
+ }
+ break;
+#ifdef ENABLE_HTTPS
+ case HTTP_VALIDATION_REDIRECT:
+ {
+ buffer_flush(w->response.data);
+ w->response.data->contenttype = CT_TEXT_HTML;
+ buffer_strcat(w->response.data,
+ "<!DOCTYPE html><!-- SPDX-License-Identifier: GPL-3.0-or-later --><html>"
+ "<body onload=\"window.location.href ='https://'+ window.location.hostname +"
+ " ':' + window.location.port + window.location.pathname + window.location.search\">"
+ "Redirecting to safety connection, case your browser does not support redirection, please"
+ " click <a onclick=\"window.location.href ='https://'+ window.location.hostname + ':' "
+ " + window.location.port + window.location.pathname + window.location.search\">here</a>."
+ "</body></html>");
+ w->response.code = HTTP_RESP_MOVED_PERM;
+ break;
+ }
+#endif
+ case HTTP_VALIDATION_MALFORMED_URL:
+ debug(D_WEB_CLIENT_ACCESS, "%llu: URL parsing failed (malformed URL). Cannot understand '%s'.", w->id, w->response.data->buffer);
+
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "URL not valid. I don't understand you...\r\n");
+ w->response.code = HTTP_RESP_BAD_REQUEST;
+ break;
+ case HTTP_VALIDATION_NOT_SUPPORTED:
+ debug(D_WEB_CLIENT_ACCESS, "%llu: Cannot understand '%s'.", w->id, w->response.data->buffer);
+
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "I don't understand you...\r\n");
+ w->response.code = HTTP_RESP_BAD_REQUEST;
+ break;
+ }
+
+ // keep track of the time we done processing
+ now_realtime_timeval(&w->tv_ready);
+
+ w->response.sent = 0;
+
+ // set a proper last modified date
+ if(unlikely(!w->response.data->date))
+ w->response.data->date = w->tv_ready.tv_sec;
+
+ web_client_send_http_header(w);
+
+ // enable sending immediately if we have data
+ if(w->response.data->len) web_client_enable_wait_send(w);
+ else web_client_disable_wait_send(w);
+
+ switch(w->mode) {
+ case WEB_CLIENT_MODE_STREAM:
+ debug(D_WEB_CLIENT, "%llu: STREAM done.", w->id);
+ break;
+
+ case WEB_CLIENT_MODE_OPTIONS:
+ debug(D_WEB_CLIENT, "%llu: Done preparing the OPTIONS response. Sending data (%zu bytes) to client.", w->id, w->response.data->len);
+ break;
+
+ case WEB_CLIENT_MODE_NORMAL:
+ debug(D_WEB_CLIENT, "%llu: Done preparing the response. Sending data (%zu bytes) to client.", w->id, w->response.data->len);
+ break;
+
+ case WEB_CLIENT_MODE_FILECOPY:
+ if(w->response.rlen) {
+ debug(D_WEB_CLIENT, "%llu: Done preparing the response. Will be sending data file of %zu bytes to client.", w->id, w->response.rlen);
+ web_client_enable_wait_receive(w);
+
+ /*
+ // utilize the kernel sendfile() for copying the file to the socket.
+ // this block of code can be commented, without anything missing.
+ // when it is commented, the program will copy the data using async I/O.
+ {
+ long len = sendfile(w->ofd, w->ifd, NULL, w->response.data->rbytes);
+ if(len != w->response.data->rbytes)
+ error("%llu: sendfile() should copy %ld bytes, but copied %ld. Falling back to manual copy.", w->id, w->response.data->rbytes, len);
+ else
+ web_client_request_done(w);
+ }
+ */
+ }
+ else
+ debug(D_WEB_CLIENT, "%llu: Done preparing the response. Will be sending an unknown amount of bytes to client.", w->id);
+ break;
+
+ default:
+ fatal("%llu: Unknown client mode %u.", w->id, w->mode);
+ break;
+ }
+}
+
+ssize_t web_client_send_chunk_header(struct web_client *w, size_t len)
+{
+ debug(D_DEFLATE, "%llu: OPEN CHUNK of %zu bytes (hex: %zx).", w->id, len, len);
+ char buf[24];
+ ssize_t bytes;
+ bytes = (ssize_t)sprintf(buf, "%zX\r\n", len);
+ buf[bytes] = 0x00;
+
+ bytes = web_client_send_data(w,buf,strlen(buf),0);
+ if(bytes > 0) {
+ debug(D_DEFLATE, "%llu: Sent chunk header %zd bytes.", w->id, bytes);
+ w->stats_sent_bytes += bytes;
+ }
+
+ else if(bytes == 0) {
+ debug(D_WEB_CLIENT, "%llu: Did not send chunk header to the client.", w->id);
+ WEB_CLIENT_IS_DEAD(w);
+ }
+ else {
+ debug(D_WEB_CLIENT, "%llu: Failed to send chunk header to client.", w->id);
+ WEB_CLIENT_IS_DEAD(w);
+ }
+
+ return bytes;
+}
+
+ssize_t web_client_send_chunk_close(struct web_client *w)
+{
+ //debug(D_DEFLATE, "%llu: CLOSE CHUNK.", w->id);
+
+ ssize_t bytes;
+ bytes = web_client_send_data(w,"\r\n",2,0);
+ if(bytes > 0) {
+ debug(D_DEFLATE, "%llu: Sent chunk suffix %zd bytes.", w->id, bytes);
+ w->stats_sent_bytes += bytes;
+ }
+
+ else if(bytes == 0) {
+ debug(D_WEB_CLIENT, "%llu: Did not send chunk suffix to the client.", w->id);
+ WEB_CLIENT_IS_DEAD(w);
+ }
+ else {
+ debug(D_WEB_CLIENT, "%llu: Failed to send chunk suffix to client.", w->id);
+ WEB_CLIENT_IS_DEAD(w);
+ }
+
+ return bytes;
+}
+
+ssize_t web_client_send_chunk_finalize(struct web_client *w)
+{
+ //debug(D_DEFLATE, "%llu: FINALIZE CHUNK.", w->id);
+
+ ssize_t bytes;
+ bytes = web_client_send_data(w,"\r\n0\r\n\r\n",7,0);
+ if(bytes > 0) {
+ debug(D_DEFLATE, "%llu: Sent chunk suffix %zd bytes.", w->id, bytes);
+ w->stats_sent_bytes += bytes;
+ }
+
+ else if(bytes == 0) {
+ debug(D_WEB_CLIENT, "%llu: Did not send chunk finalize suffix to the client.", w->id);
+ WEB_CLIENT_IS_DEAD(w);
+ }
+ else {
+ debug(D_WEB_CLIENT, "%llu: Failed to send chunk finalize suffix to client.", w->id);
+ WEB_CLIENT_IS_DEAD(w);
+ }
+
+ return bytes;
+}
+
+#ifdef NETDATA_WITH_ZLIB
+ssize_t web_client_send_deflate(struct web_client *w)
+{
+ ssize_t len = 0, t = 0;
+
+ // when using compression,
+ // w->response.sent is the amount of bytes passed through compression
+
+ debug(D_DEFLATE, "%llu: web_client_send_deflate(): w->response.data->len = %zu, w->response.sent = %zu, w->response.zhave = %zu, w->response.zsent = %zu, w->response.zstream.avail_in = %u, w->response.zstream.avail_out = %u, w->response.zstream.total_in = %lu, w->response.zstream.total_out = %lu.",
+ w->id, w->response.data->len, w->response.sent, w->response.zhave, w->response.zsent, w->response.zstream.avail_in, w->response.zstream.avail_out, w->response.zstream.total_in, w->response.zstream.total_out);
+
+ if(w->response.data->len - w->response.sent == 0 && w->response.zstream.avail_in == 0 && w->response.zhave == w->response.zsent && w->response.zstream.avail_out != 0) {
+ // there is nothing to send
+
+ debug(D_WEB_CLIENT, "%llu: Out of output data.", w->id);
+
+ // finalize the chunk
+ if(w->response.sent != 0) {
+ t = web_client_send_chunk_finalize(w);
+ if(t < 0) return t;
+ }
+
+ if(w->mode == WEB_CLIENT_MODE_FILECOPY && web_client_has_wait_receive(w) && w->response.rlen && w->response.rlen > w->response.data->len) {
+ // we have to wait, more data will come
+ debug(D_WEB_CLIENT, "%llu: Waiting for more data to become available.", w->id);
+ web_client_disable_wait_send(w);
+ return t;
+ }
+
+ if(unlikely(!web_client_has_keepalive(w))) {
+ debug(D_WEB_CLIENT, "%llu: Closing (keep-alive is not enabled). %zu bytes sent.", w->id, w->response.sent);
+ WEB_CLIENT_IS_DEAD(w);
+ return t;
+ }
+
+ // reset the client
+ web_client_request_done(w);
+ debug(D_WEB_CLIENT, "%llu: Done sending all data on socket.", w->id);
+ return t;
+ }
+
+ if(w->response.zhave == w->response.zsent) {
+ // compress more input data
+
+ // close the previous open chunk
+ if(w->response.sent != 0) {
+ t = web_client_send_chunk_close(w);
+ if(t < 0) return t;
+ }
+
+ debug(D_DEFLATE, "%llu: Compressing %zu new bytes starting from %zu (and %u left behind).", w->id, (w->response.data->len - w->response.sent), w->response.sent, w->response.zstream.avail_in);
+
+ // give the compressor all the data not passed through the compressor yet
+ if(w->response.data->len > w->response.sent) {
+ w->response.zstream.next_in = (Bytef *)&w->response.data->buffer[w->response.sent - w->response.zstream.avail_in];
+ w->response.zstream.avail_in += (uInt) (w->response.data->len - w->response.sent);
+ }
+
+ // reset the compressor output buffer
+ w->response.zstream.next_out = w->response.zbuffer;
+ w->response.zstream.avail_out = NETDATA_WEB_RESPONSE_ZLIB_CHUNK_SIZE;
+
+ // ask for FINISH if we have all the input
+ int flush = Z_SYNC_FLUSH;
+ if(w->mode == WEB_CLIENT_MODE_NORMAL
+ || (w->mode == WEB_CLIENT_MODE_FILECOPY && !web_client_has_wait_receive(w) && w->response.data->len == w->response.rlen)) {
+ flush = Z_FINISH;
+ debug(D_DEFLATE, "%llu: Requesting Z_FINISH, if possible.", w->id);
+ }
+ else {
+ debug(D_DEFLATE, "%llu: Requesting Z_SYNC_FLUSH.", w->id);
+ }
+
+ // compress
+ if(deflate(&w->response.zstream, flush) == Z_STREAM_ERROR) {
+ error("%llu: Compression failed. Closing down client.", w->id);
+ web_client_request_done(w);
+ return(-1);
+ }
+
+ w->response.zhave = NETDATA_WEB_RESPONSE_ZLIB_CHUNK_SIZE - w->response.zstream.avail_out;
+ w->response.zsent = 0;
+
+ // keep track of the bytes passed through the compressor
+ w->response.sent = w->response.data->len;
+
+ debug(D_DEFLATE, "%llu: Compression produced %zu bytes.", w->id, w->response.zhave);
+
+ // open a new chunk
+ ssize_t t2 = web_client_send_chunk_header(w, w->response.zhave);
+ if(t2 < 0) return t2;
+ t += t2;
+ }
+
+ debug(D_WEB_CLIENT, "%llu: Sending %zu bytes of data (+%zd of chunk header).", w->id, w->response.zhave - w->response.zsent, t);
+
+ len = web_client_send_data(w,&w->response.zbuffer[w->response.zsent], (size_t) (w->response.zhave - w->response.zsent), MSG_DONTWAIT);
+ if(len > 0) {
+ w->stats_sent_bytes += len;
+ w->response.zsent += len;
+ len += t;
+ debug(D_WEB_CLIENT, "%llu: Sent %zd bytes.", w->id, len);
+ }
+ else if(len == 0) {
+ debug(D_WEB_CLIENT, "%llu: Did not send any bytes to the client (zhave = %zu, zsent = %zu, need to send = %zu).",
+ w->id, w->response.zhave, w->response.zsent, w->response.zhave - w->response.zsent);
+
+ WEB_CLIENT_IS_DEAD(w);
+ }
+ else {
+ debug(D_WEB_CLIENT, "%llu: Failed to send data to client.", w->id);
+ WEB_CLIENT_IS_DEAD(w);
+ }
+
+ return(len);
+}
+#endif // NETDATA_WITH_ZLIB
+
+ssize_t web_client_send(struct web_client *w) {
+#ifdef NETDATA_WITH_ZLIB
+ if(likely(w->response.zoutput)) return web_client_send_deflate(w);
+#endif // NETDATA_WITH_ZLIB
+
+ ssize_t bytes;
+
+ if(unlikely(w->response.data->len - w->response.sent == 0)) {
+ // there is nothing to send
+
+ debug(D_WEB_CLIENT, "%llu: Out of output data.", w->id);
+
+ // there can be two cases for this
+ // A. we have done everything
+ // B. we temporarily have nothing to send, waiting for the buffer to be filled by ifd
+
+ if(w->mode == WEB_CLIENT_MODE_FILECOPY && web_client_has_wait_receive(w) && w->response.rlen && w->response.rlen > w->response.data->len) {
+ // we have to wait, more data will come
+ debug(D_WEB_CLIENT, "%llu: Waiting for more data to become available.", w->id);
+ web_client_disable_wait_send(w);
+ return 0;
+ }
+
+ if(unlikely(!web_client_has_keepalive(w))) {
+ debug(D_WEB_CLIENT, "%llu: Closing (keep-alive is not enabled). %zu bytes sent.", w->id, w->response.sent);
+ WEB_CLIENT_IS_DEAD(w);
+ return 0;
+ }
+
+ web_client_request_done(w);
+ debug(D_WEB_CLIENT, "%llu: Done sending all data on socket. Waiting for next request on the same socket.", w->id);
+ return 0;
+ }
+
+ bytes = web_client_send_data(w,&w->response.data->buffer[w->response.sent], w->response.data->len - w->response.sent, MSG_DONTWAIT);
+ if(likely(bytes > 0)) {
+ w->stats_sent_bytes += bytes;
+ w->response.sent += bytes;
+ debug(D_WEB_CLIENT, "%llu: Sent %zd bytes.", w->id, bytes);
+ }
+ else if(likely(bytes == 0)) {
+ debug(D_WEB_CLIENT, "%llu: Did not send any bytes to the client.", w->id);
+ WEB_CLIENT_IS_DEAD(w);
+ }
+ else {
+ debug(D_WEB_CLIENT, "%llu: Failed to send data to client.", w->id);
+ WEB_CLIENT_IS_DEAD(w);
+ }
+
+ return(bytes);
+}
+
+ssize_t web_client_read_file(struct web_client *w)
+{
+ if(unlikely(w->response.rlen > w->response.data->size))
+ buffer_need_bytes(w->response.data, w->response.rlen - w->response.data->size);
+
+ if(unlikely(w->response.rlen <= w->response.data->len))
+ return 0;
+
+ ssize_t left = w->response.rlen - w->response.data->len;
+ ssize_t bytes = read(w->ifd, &w->response.data->buffer[w->response.data->len], (size_t)left);
+ if(likely(bytes > 0)) {
+ size_t old = w->response.data->len;
+ (void)old;
+
+ w->response.data->len += bytes;
+ w->response.data->buffer[w->response.data->len] = '\0';
+
+ debug(D_WEB_CLIENT, "%llu: Read %zd bytes.", w->id, bytes);
+ debug(D_WEB_DATA, "%llu: Read data: '%s'.", w->id, &w->response.data->buffer[old]);
+
+ web_client_enable_wait_send(w);
+
+ if(w->response.rlen && w->response.data->len >= w->response.rlen)
+ web_client_disable_wait_receive(w);
+ }
+ else if(likely(bytes == 0)) {
+ debug(D_WEB_CLIENT, "%llu: Out of input file data.", w->id);
+
+ // if we cannot read, it means we have an error on input.
+ // if however, we are copying a file from ifd to ofd, we should not return an error.
+ // in this case, the error should be generated when the file has been sent to the client.
+
+ // we are copying data from ifd to ofd
+ // let it finish copying...
+ web_client_disable_wait_receive(w);
+
+ debug(D_WEB_CLIENT, "%llu: Read the whole file.", w->id);
+
+ if(web_server_mode != WEB_SERVER_MODE_STATIC_THREADED) {
+ if (w->ifd != w->ofd) close(w->ifd);
+ }
+
+ w->ifd = w->ofd;
+ }
+ else {
+ debug(D_WEB_CLIENT, "%llu: read data failed.", w->id);
+ WEB_CLIENT_IS_DEAD(w);
+ }
+
+ return(bytes);
+}
+
+ssize_t web_client_receive(struct web_client *w)
+{
+ if(unlikely(w->mode == WEB_CLIENT_MODE_FILECOPY))
+ return web_client_read_file(w);
+
+ ssize_t bytes;
+ ssize_t left = w->response.data->size - w->response.data->len;
+
+ // do we have any space for more data?
+ buffer_need_bytes(w->response.data, NETDATA_WEB_REQUEST_RECEIVE_SIZE);
+
+#ifdef ENABLE_HTTPS
+ if ( (!web_client_check_unix(w)) && (netdata_srv_ctx) ) {
+ if ( ( w->ssl.conn ) && (!w->ssl.flags)) {
+ bytes = SSL_read(w->ssl.conn, &w->response.data->buffer[w->response.data->len], (size_t) (left - 1));
+ }else {
+ bytes = recv(w->ifd, &w->response.data->buffer[w->response.data->len], (size_t) (left - 1), MSG_DONTWAIT);
+ }
+ }
+ else{
+ bytes = recv(w->ifd, &w->response.data->buffer[w->response.data->len], (size_t) (left - 1), MSG_DONTWAIT);
+ }
+#else
+ bytes = recv(w->ifd, &w->response.data->buffer[w->response.data->len], (size_t) (left - 1), MSG_DONTWAIT);
+#endif
+
+ if(likely(bytes > 0)) {
+ w->stats_received_bytes += bytes;
+
+ size_t old = w->response.data->len;
+ (void)old;
+
+ w->response.data->len += bytes;
+ w->response.data->buffer[w->response.data->len] = '\0';
+
+ debug(D_WEB_CLIENT, "%llu: Received %zd bytes.", w->id, bytes);
+ debug(D_WEB_DATA, "%llu: Received data: '%s'.", w->id, &w->response.data->buffer[old]);
+ }
+ else {
+ debug(D_WEB_CLIENT, "%llu: receive data failed.", w->id);
+ WEB_CLIENT_IS_DEAD(w);
+ }
+
+ return(bytes);
+}
diff --git a/web/server/web_client.h b/web/server/web_client.h
new file mode 100644
index 0000000..48bf1ac
--- /dev/null
+++ b/web/server/web_client.h
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_WEB_CLIENT_H
+#define NETDATA_WEB_CLIENT_H 1
+
+#include "libnetdata/libnetdata.h"
+
+#ifdef NETDATA_WITH_ZLIB
+extern int web_enable_gzip, web_gzip_level, web_gzip_strategy;
+#endif /* NETDATA_WITH_ZLIB */
+
+// HTTP_CODES 2XX Success
+#define HTTP_RESP_OK 200
+
+// HTTP_CODES 3XX Redirections
+#define HTTP_RESP_MOVED_PERM 301
+#define HTTP_RESP_REDIR_TEMP 307
+#define HTTP_RESP_REDIR_PERM 308
+
+// HTTP_CODES 4XX Client Errors
+#define HTTP_RESP_BAD_REQUEST 400
+#define HTTP_RESP_FORBIDDEN 403
+#define HTTP_RESP_NOT_FOUND 404
+#define HTTP_RESP_PRECOND_FAIL 412
+
+// HTTP_CODES 5XX Server Errors
+#define HTTP_RESP_INTERNAL_SERVER_ERROR 500
+#define HTTP_RESP_BACKEND_FETCH_FAILED 503
+
+extern int respect_web_browser_do_not_track_policy;
+extern char *web_x_frame_options;
+
+typedef enum web_client_mode {
+ WEB_CLIENT_MODE_NORMAL = 0,
+ WEB_CLIENT_MODE_FILECOPY = 1,
+ WEB_CLIENT_MODE_OPTIONS = 2,
+ WEB_CLIENT_MODE_STREAM = 3
+} WEB_CLIENT_MODE;
+
+typedef enum {
+ HTTP_VALIDATION_OK,
+ HTTP_VALIDATION_NOT_SUPPORTED,
+ HTTP_VALIDATION_MALFORMED_URL,
+#ifdef ENABLE_HTTPS
+ HTTP_VALIDATION_INCOMPLETE,
+ HTTP_VALIDATION_REDIRECT
+#else
+ HTTP_VALIDATION_INCOMPLETE
+#endif
+} HTTP_VALIDATION;
+
+typedef enum web_client_flags {
+ WEB_CLIENT_FLAG_DEAD = 1 << 1, // if set, this client is dead
+
+ WEB_CLIENT_FLAG_KEEPALIVE = 1 << 2, // if set, the web client will be re-used
+
+ WEB_CLIENT_FLAG_WAIT_RECEIVE = 1 << 3, // if set, we are waiting more input data
+ WEB_CLIENT_FLAG_WAIT_SEND = 1 << 4, // if set, we have data to send to the client
+
+ WEB_CLIENT_FLAG_DO_NOT_TRACK = 1 << 5, // if set, we should not set cookies on this client
+ WEB_CLIENT_FLAG_TRACKING_REQUIRED = 1 << 6, // if set, we need to send cookies
+
+ WEB_CLIENT_FLAG_TCP_CLIENT = 1 << 7, // if set, the client is using a TCP socket
+ WEB_CLIENT_FLAG_UNIX_CLIENT = 1 << 8, // if set, the client is using a UNIX socket
+
+ WEB_CLIENT_FLAG_DONT_CLOSE_SOCKET = 1 << 9, // don't close the socket when cleaning up (static-threaded web server)
+
+ WEB_CLIENT_CHUNKED_TRANSFER = 1 << 10, // chunked transfer (used with zlib compression)
+} WEB_CLIENT_FLAGS;
+
+//#ifdef HAVE_C___ATOMIC
+//#define web_client_flag_check(w, flag) (__atomic_load_n(&((w)->flags), __ATOMIC_SEQ_CST) & flag)
+//#define web_client_flag_set(w, flag) __atomic_or_fetch(&((w)->flags), flag, __ATOMIC_SEQ_CST)
+//#define web_client_flag_clear(w, flag) __atomic_and_fetch(&((w)->flags), ~flag, __ATOMIC_SEQ_CST)
+//#else
+#define web_client_flag_check(w, flag) ((w)->flags & (flag))
+#define web_client_flag_set(w, flag) (w)->flags |= flag
+#define web_client_flag_clear(w, flag) (w)->flags &= ~flag
+//#endif
+
+#define WEB_CLIENT_IS_DEAD(w) web_client_flag_set(w, WEB_CLIENT_FLAG_DEAD)
+#define web_client_check_dead(w) web_client_flag_check(w, WEB_CLIENT_FLAG_DEAD)
+
+#define web_client_has_keepalive(w) web_client_flag_check(w, WEB_CLIENT_FLAG_KEEPALIVE)
+#define web_client_enable_keepalive(w) web_client_flag_set(w, WEB_CLIENT_FLAG_KEEPALIVE)
+#define web_client_disable_keepalive(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_KEEPALIVE)
+
+#define web_client_has_donottrack(w) web_client_flag_check(w, WEB_CLIENT_FLAG_DO_NOT_TRACK)
+#define web_client_enable_donottrack(w) web_client_flag_set(w, WEB_CLIENT_FLAG_DO_NOT_TRACK)
+#define web_client_disable_donottrack(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_DO_NOT_TRACK)
+
+#define web_client_has_tracking_required(w) web_client_flag_check(w, WEB_CLIENT_FLAG_TRACKING_REQUIRED)
+#define web_client_enable_tracking_required(w) web_client_flag_set(w, WEB_CLIENT_FLAG_TRACKING_REQUIRED)
+#define web_client_disable_tracking_required(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_TRACKING_REQUIRED)
+
+#define web_client_has_wait_receive(w) web_client_flag_check(w, WEB_CLIENT_FLAG_WAIT_RECEIVE)
+#define web_client_enable_wait_receive(w) web_client_flag_set(w, WEB_CLIENT_FLAG_WAIT_RECEIVE)
+#define web_client_disable_wait_receive(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_WAIT_RECEIVE)
+
+#define web_client_has_wait_send(w) web_client_flag_check(w, WEB_CLIENT_FLAG_WAIT_SEND)
+#define web_client_enable_wait_send(w) web_client_flag_set(w, WEB_CLIENT_FLAG_WAIT_SEND)
+#define web_client_disable_wait_send(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_WAIT_SEND)
+
+#define web_client_set_tcp(w) web_client_flag_set(w, WEB_CLIENT_FLAG_TCP_CLIENT)
+#define web_client_set_unix(w) web_client_flag_set(w, WEB_CLIENT_FLAG_UNIX_CLIENT)
+#define web_client_check_unix(w) web_client_flag_check(w, WEB_CLIENT_FLAG_UNIX_CLIENT)
+#define web_client_check_tcp(w) web_client_flag_check(w, WEB_CLIENT_FLAG_TCP_CLIENT)
+
+#define web_client_is_corkable(w) web_client_flag_check(w, WEB_CLIENT_FLAG_TCP_CLIENT)
+
+#define NETDATA_WEB_REQUEST_URL_SIZE 8192
+#define NETDATA_WEB_RESPONSE_ZLIB_CHUNK_SIZE 16384
+#define NETDATA_WEB_RESPONSE_HEADER_SIZE 4096
+#define NETDATA_WEB_REQUEST_COOKIE_SIZE 1024
+#define NETDATA_WEB_REQUEST_ORIGIN_HEADER_SIZE 1024
+#define NETDATA_WEB_RESPONSE_INITIAL_SIZE 16384
+#define NETDATA_WEB_REQUEST_RECEIVE_SIZE 16384
+#define NETDATA_WEB_REQUEST_MAX_SIZE 16384
+
+struct response {
+ BUFFER *header; // our response header
+ BUFFER *header_output; // internal use
+ BUFFER *data; // our response data buffer
+
+ int code; // the HTTP response code
+
+ size_t rlen; // if non-zero, the excepted size of ifd (input of firecopy)
+ size_t sent; // current data length sent to output
+
+ int zoutput; // if set to 1, web_client_send() will send compressed data
+#ifdef NETDATA_WITH_ZLIB
+ z_stream zstream; // zlib stream for sending compressed output to client
+ Bytef zbuffer[NETDATA_WEB_RESPONSE_ZLIB_CHUNK_SIZE]; // temporary buffer for storing compressed output
+ size_t zsent; // the compressed bytes we have sent to the client
+ size_t zhave; // the compressed bytes that we have received from zlib
+ unsigned int zinitialized : 1;
+#endif /* NETDATA_WITH_ZLIB */
+};
+
+struct web_client {
+ unsigned long long id;
+
+ WEB_CLIENT_FLAGS flags; // status flags for the client
+ WEB_CLIENT_MODE mode; // the operational mode of the client
+ WEB_CLIENT_ACL acl; // the access list of the client
+ int port_acl; // the operations permitted on the port the client connected to
+ char *auth_bearer_token; // the Bearer auth token (if sent)
+ size_t header_parse_tries;
+ size_t header_parse_last_size;
+
+ int tcp_cork; // 1 = we have a cork on the socket
+
+ int ifd;
+ int ofd;
+
+ char client_ip[INET6_ADDRSTRLEN]; // Defined buffer sizes include null-terminators
+ char client_port[NI_MAXSERV];
+ char server_host[NI_MAXHOST];
+ char client_host[NI_MAXHOST];
+ char forwarded_host[NI_MAXHOST]; //Used with proxy
+
+ char decoded_url[NETDATA_WEB_REQUEST_URL_SIZE + 1]; // we decode the URL in this buffer
+ char decoded_query_string[NETDATA_WEB_REQUEST_URL_SIZE + 1]; // we decode the Query String in this buffer
+ char last_url[NETDATA_WEB_REQUEST_URL_SIZE + 1]; // we keep a copy of the decoded URL here
+ size_t url_path_length;
+ char separator; // This value can be either '?' or 'f'
+ char *url_search_path; //A pointer to the search path sent by the client
+
+ struct timeval tv_in, tv_ready;
+
+ char cookie1[NETDATA_WEB_REQUEST_COOKIE_SIZE + 1];
+ char cookie2[NETDATA_WEB_REQUEST_COOKIE_SIZE + 1];
+ char origin[NETDATA_WEB_REQUEST_ORIGIN_HEADER_SIZE + 1];
+ char *user_agent;
+
+ struct response response;
+
+ size_t stats_received_bytes;
+ size_t stats_sent_bytes;
+
+ // cache of web_client allocations
+ struct web_client *prev; // maintain a linked list of web clients
+ struct web_client *next; // for the web servers that need it
+
+ // MULTI-THREADED WEB SERVER MEMBERS
+ netdata_thread_t thread; // the thread servicing this client
+ volatile int running; // 1 when the thread runs, 0 otherwise
+
+ // STATIC-THREADED WEB SERVER MEMBERS
+ size_t pollinfo_slot; // POLLINFO slot of the web client
+ size_t pollinfo_filecopy_slot; // POLLINFO slot of the file read
+#ifdef ENABLE_HTTPS
+ struct netdata_ssl ssl;
+#endif
+};
+
+extern uid_t web_files_uid(void);
+extern uid_t web_files_gid(void);
+
+extern int web_client_permission_denied(struct web_client *w);
+
+extern ssize_t web_client_send(struct web_client *w);
+extern ssize_t web_client_receive(struct web_client *w);
+extern ssize_t web_client_read_file(struct web_client *w);
+
+extern void web_client_process_request(struct web_client *w);
+extern void web_client_request_done(struct web_client *w);
+
+extern void buffer_data_options2string(BUFFER *wb, uint32_t options);
+
+extern int mysendfile(struct web_client *w, char *filename);
+
+extern void web_client_build_http_header(struct web_client *w);
+
+#include "daemon/common.h"
+
+#endif
diff --git a/web/server/web_client_cache.c b/web/server/web_client_cache.c
new file mode 100644
index 0000000..afd51d8
--- /dev/null
+++ b/web/server/web_client_cache.c
@@ -0,0 +1,269 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#define WEB_SERVER_INTERNALS 1
+#include "web_client_cache.h"
+
+// ----------------------------------------------------------------------------
+// allocate and free web_clients
+
+#ifdef ENABLE_HTTPS
+
+static void web_client_reuse_ssl(struct web_client *w) {
+ if (netdata_srv_ctx) {
+ if (w->ssl.conn) {
+ SSL_clear(w->ssl.conn);
+ }
+ }
+}
+#endif
+
+
+static void web_client_zero(struct web_client *w) {
+ // zero everything about it - but keep the buffers
+
+ // remember the pointers to the buffers
+ BUFFER *b1 = w->response.data;
+ BUFFER *b2 = w->response.header;
+ BUFFER *b3 = w->response.header_output;
+
+ // empty the buffers
+ buffer_flush(b1);
+ buffer_flush(b2);
+ buffer_flush(b3);
+
+ freez(w->user_agent);
+
+ // zero everything
+ memset(w, 0, sizeof(struct web_client));
+
+ // restore the pointers of the buffers
+ w->response.data = b1;
+ w->response.header = b2;
+ w->response.header_output = b3;
+}
+
+static void web_client_free(struct web_client *w) {
+ buffer_free(w->response.header_output);
+ buffer_free(w->response.header);
+ buffer_free(w->response.data);
+ freez(w->user_agent);
+#ifdef ENABLE_HTTPS
+ if ((!web_client_check_unix(w)) && ( netdata_srv_ctx )) {
+ if (w->ssl.conn) {
+ SSL_free(w->ssl.conn);
+ w->ssl.conn = NULL;
+ }
+ }
+#endif
+ freez(w);
+}
+
+static struct web_client *web_client_alloc(void) {
+ struct web_client *w = callocz(1, sizeof(struct web_client));
+ w->response.data = buffer_create(NETDATA_WEB_RESPONSE_INITIAL_SIZE);
+ w->response.header = buffer_create(NETDATA_WEB_RESPONSE_HEADER_SIZE);
+ w->response.header_output = buffer_create(NETDATA_WEB_RESPONSE_HEADER_SIZE);
+ return w;
+}
+
+// ----------------------------------------------------------------------------
+// web clients caching
+
+// When clients connect and disconnect, avoid allocating and releasing memory.
+// Instead, when new clients get connected, reuse any memory previously allocated
+// for serving web clients that are now disconnected.
+
+// The size of the cache is adaptive. It caches the structures of 2x
+// the number of currently connected clients.
+
+// Comments per server:
+// SINGLE-THREADED : 1 cache is maintained
+// MULTI-THREADED : 1 cache is maintained
+// STATIC-THREADED : 1 cache for each thred of the web server
+
+__thread struct clients_cache web_clients_cache = {
+ .pid = 0,
+ .used = NULL,
+ .used_count = 0,
+ .avail = NULL,
+ .avail_count = 0,
+ .allocated = 0,
+ .reused = 0
+};
+
+inline void web_client_cache_verify(int force) {
+#ifdef NETDATA_INTERNAL_CHECKS
+ static __thread size_t count = 0;
+ count++;
+
+ if(unlikely(force || count > 1000)) {
+ count = 0;
+
+ struct web_client *w;
+ size_t used = 0, avail = 0;
+ for(w = web_clients_cache.used; w ; w = w->next) used++;
+ for(w = web_clients_cache.avail; w ; w = w->next) avail++;
+
+ info("web_client_cache has %zu (%zu) used and %zu (%zu) available clients, allocated %zu, reused %zu (hit %zu%%)."
+ , used, web_clients_cache.used_count
+ , avail, web_clients_cache.avail_count
+ , web_clients_cache.allocated
+ , web_clients_cache.reused
+ , (web_clients_cache.allocated + web_clients_cache.reused)?(web_clients_cache.reused * 100 / (web_clients_cache.allocated + web_clients_cache.reused)):0
+ );
+ }
+#else
+ if(unlikely(force)) {
+ info("web_client_cache has %zu used and %zu available clients, allocated %zu, reused %zu (hit %zu%%)."
+ , web_clients_cache.used_count
+ , web_clients_cache.avail_count
+ , web_clients_cache.allocated
+ , web_clients_cache.reused
+ , (web_clients_cache.allocated + web_clients_cache.reused)?(web_clients_cache.reused * 100 / (web_clients_cache.allocated + web_clients_cache.reused)):0
+ );
+ }
+#endif
+}
+
+// destroy the cache and free all the memory it uses
+void web_client_cache_destroy(void) {
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(unlikely(web_clients_cache.pid != 0 && web_clients_cache.pid != gettid()))
+ error("Oops! wrong thread accessing the cache. Expected %d, found %d", (int)web_clients_cache.pid, (int)gettid());
+
+ web_client_cache_verify(1);
+#endif
+
+ netdata_thread_disable_cancelability();
+
+ struct web_client *w, *t;
+
+ w = web_clients_cache.used;
+ while(w) {
+ t = w;
+ w = w->next;
+ web_client_free(t);
+ }
+ web_clients_cache.used = NULL;
+ web_clients_cache.used_count = 0;
+
+ w = web_clients_cache.avail;
+ while(w) {
+ t = w;
+ w = w->next;
+ web_client_free(t);
+ }
+ web_clients_cache.avail = NULL;
+ web_clients_cache.avail_count = 0;
+
+ netdata_thread_enable_cancelability();
+}
+
+struct web_client *web_client_get_from_cache_or_allocate() {
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(unlikely(web_clients_cache.pid == 0))
+ web_clients_cache.pid = gettid();
+
+ if(unlikely(web_clients_cache.pid != 0 && web_clients_cache.pid != gettid()))
+ error("Oops! wrong thread accessing the cache. Expected %d, found %d", (int)web_clients_cache.pid, (int)gettid());
+#endif
+
+ netdata_thread_disable_cancelability();
+
+ struct web_client *w = web_clients_cache.avail;
+
+ if(w) {
+ // get it from avail
+ if (w == web_clients_cache.avail) web_clients_cache.avail = w->next;
+ if(w->prev) w->prev->next = w->next;
+ if(w->next) w->next->prev = w->prev;
+ web_clients_cache.avail_count--;
+#ifdef ENABLE_HTTPS
+ web_client_reuse_ssl(w);
+ SSL *ssl = w->ssl.conn;
+#endif
+ web_client_zero(w);
+ web_clients_cache.reused++;
+#ifdef ENABLE_HTTPS
+ w->ssl.conn = ssl;
+ w->ssl.flags = NETDATA_SSL_START;
+ debug(D_WEB_CLIENT_ACCESS,"Reusing SSL structure with (w->ssl = NULL, w->accepted = %u)", w->ssl.flags);
+#endif
+ }
+ else {
+ // allocate it
+ w = web_client_alloc();
+#ifdef ENABLE_HTTPS
+ w->ssl.flags = NETDATA_SSL_START;
+ debug(D_WEB_CLIENT_ACCESS,"Starting SSL structure with (w->ssl = NULL, w->accepted = %u)", w->ssl.flags);
+#endif
+ web_clients_cache.allocated++;
+ }
+
+ // link it to used web clients
+ if (web_clients_cache.used) web_clients_cache.used->prev = w;
+ w->next = web_clients_cache.used;
+ w->prev = NULL;
+ web_clients_cache.used = w;
+ web_clients_cache.used_count++;
+
+ // initialize it
+ w->id = web_client_connected();
+ w->mode = WEB_CLIENT_MODE_NORMAL;
+
+ netdata_thread_enable_cancelability();
+
+ return w;
+}
+
+void web_client_release(struct web_client *w) {
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(unlikely(web_clients_cache.pid != 0 && web_clients_cache.pid != gettid()))
+ error("Oops! wrong thread accessing the cache. Expected %d, found %d", (int)web_clients_cache.pid, (int)gettid());
+
+ if(unlikely(w->running))
+ error("%llu: releasing web client from %s port %s, but it still running.", w->id, w->client_ip, w->client_port);
+#endif
+
+ debug(D_WEB_CLIENT_ACCESS, "%llu: Closing web client from %s port %s.", w->id, w->client_ip, w->client_port);
+
+ web_server_log_connection(w, "DISCONNECTED");
+ web_client_request_done(w);
+ web_client_disconnected();
+
+ netdata_thread_disable_cancelability();
+
+ if(web_server_mode != WEB_SERVER_MODE_STATIC_THREADED) {
+ if (w->ifd != -1) close(w->ifd);
+ if (w->ofd != -1 && w->ofd != w->ifd) close(w->ofd);
+ w->ifd = w->ofd = -1;
+#ifdef ENABLE_HTTPS
+ web_client_reuse_ssl(w);
+ w->ssl.flags = NETDATA_SSL_START;
+#endif
+
+ }
+
+ // unlink it from the used
+ if (w == web_clients_cache.used) web_clients_cache.used = w->next;
+ if(w->prev) w->prev->next = w->next;
+ if(w->next) w->next->prev = w->prev;
+ web_clients_cache.used_count--;
+
+ if(web_clients_cache.avail_count >= 2 * web_clients_cache.used_count) {
+ // we have too many of them - free it
+ web_client_free(w);
+ }
+ else {
+ // link it to the avail
+ if (web_clients_cache.avail) web_clients_cache.avail->prev = w;
+ w->next = web_clients_cache.avail;
+ w->prev = NULL;
+ web_clients_cache.avail = w;
+ web_clients_cache.avail_count++;
+ }
+
+ netdata_thread_enable_cancelability();
+}
+
diff --git a/web/server/web_client_cache.h b/web/server/web_client_cache.h
new file mode 100644
index 0000000..f638880
--- /dev/null
+++ b/web/server/web_client_cache.h
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_WEB_CLIENT_CACHE_H
+#define NETDATA_WEB_CLIENT_CACHE_H
+
+#include "libnetdata/libnetdata.h"
+#include "web_client.h"
+
+struct clients_cache {
+ pid_t pid;
+
+ struct web_client *used; // the structures of the currently connected clients
+ size_t used_count; // the count the currently connected clients
+
+ struct web_client *avail; // the cached structures, available for future clients
+ size_t avail_count; // the number of cached structures
+
+ size_t reused; // the number of re-uses
+ size_t allocated; // the number of allocations
+};
+
+extern __thread struct clients_cache web_clients_cache;
+
+extern void web_client_release(struct web_client *w);
+extern struct web_client *web_client_get_from_cache_or_allocate();
+extern void web_client_cache_destroy(void);
+extern void web_client_cache_verify(int force);
+
+#include "web_server.h"
+
+#endif //NETDATA_WEB_CLIENT_CACHE_H
diff --git a/web/server/web_server.c b/web/server/web_server.c
new file mode 100644
index 0000000..4da08d4
--- /dev/null
+++ b/web/server/web_server.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#define WEB_SERVER_INTERNALS 1
+#include "web_server.h"
+
+WEB_SERVER_MODE web_server_mode = WEB_SERVER_MODE_STATIC_THREADED;
+
+// --------------------------------------------------------------------------------------
+
+WEB_SERVER_MODE web_server_mode_id(const char *mode) {
+ if(!strcmp(mode, "none"))
+ return WEB_SERVER_MODE_NONE;
+ else
+ return WEB_SERVER_MODE_STATIC_THREADED;
+
+}
+
+const char *web_server_mode_name(WEB_SERVER_MODE id) {
+ switch(id) {
+ case WEB_SERVER_MODE_NONE:
+ return "none";
+ default:
+ case WEB_SERVER_MODE_STATIC_THREADED:
+ return "static-threaded";
+ }
+}
+
+// --------------------------------------------------------------------------------------
+// API sockets
+
+LISTEN_SOCKETS api_sockets = {
+ .config = &netdata_config,
+ .config_section = CONFIG_SECTION_WEB,
+ .default_bind_to = "*",
+ .default_port = API_LISTEN_PORT,
+ .backlog = API_LISTEN_BACKLOG
+};
+
+void debug_sockets() {
+ BUFFER *wb = buffer_create(256 * sizeof(char));
+ int i;
+
+ for(i = 0 ; i < (int)api_sockets.opened ; i++) {
+ buffer_strcat(wb, (api_sockets.fds_acl_flags[i] & WEB_CLIENT_ACL_NOCHECK)?"NONE ":"");
+ buffer_strcat(wb, (api_sockets.fds_acl_flags[i] & WEB_CLIENT_ACL_DASHBOARD)?"dashboard ":"");
+ buffer_strcat(wb, (api_sockets.fds_acl_flags[i] & WEB_CLIENT_ACL_REGISTRY)?"registry ":"");
+ buffer_strcat(wb, (api_sockets.fds_acl_flags[i] & WEB_CLIENT_ACL_BADGE)?"badges ":"");
+ buffer_strcat(wb, (api_sockets.fds_acl_flags[i] & WEB_CLIENT_ACL_MGMT)?"management ":"");
+ buffer_strcat(wb, (api_sockets.fds_acl_flags[i] & WEB_CLIENT_ACL_STREAMING)?"streaming ":"");
+ buffer_strcat(wb, (api_sockets.fds_acl_flags[i] & WEB_CLIENT_ACL_NETDATACONF)?"netdata.conf ":"");
+ debug(D_WEB_CLIENT, "Socket fd %d name '%s' acl_flags: %s",
+ i,
+ api_sockets.fds_names[i],
+ buffer_tostring(wb));
+ buffer_reset(wb);
+ }
+ buffer_free(wb);
+}
+
+void api_listen_sockets_setup(void) {
+ int socks = listen_sockets_setup(&api_sockets);
+
+ if(!socks)
+ fatal("LISTENER: Cannot listen on any API socket. Exiting...");
+
+ if(unlikely(debug_flags & D_WEB_CLIENT))
+ debug_sockets();
+
+ return;
+}
+
+
+// --------------------------------------------------------------------------------------
+// access lists
+
+SIMPLE_PATTERN *web_allow_connections_from = NULL;
+int web_allow_connections_dns;
+
+// WEB_CLIENT_ACL
+SIMPLE_PATTERN *web_allow_dashboard_from = NULL;
+int web_allow_dashboard_dns;
+SIMPLE_PATTERN *web_allow_registry_from = NULL;
+int web_allow_registry_dns;
+SIMPLE_PATTERN *web_allow_badges_from = NULL;
+int web_allow_badges_dns;
+SIMPLE_PATTERN *web_allow_mgmt_from = NULL;
+int web_allow_mgmt_dns;
+SIMPLE_PATTERN *web_allow_streaming_from = NULL;
+int web_allow_streaming_dns;
+SIMPLE_PATTERN *web_allow_netdataconf_from = NULL;
+int web_allow_netdataconf_dns;
+
+void web_client_update_acl_matches(struct web_client *w) {
+ w->acl = WEB_CLIENT_ACL_NONE;
+
+ if (!web_allow_dashboard_from ||
+ connection_allowed(w->ifd, w->client_ip, w->client_host, sizeof(w->client_host),
+ web_allow_dashboard_from, "dashboard", web_allow_dashboard_dns))
+ w->acl |= WEB_CLIENT_ACL_DASHBOARD;
+
+ if (!web_allow_registry_from ||
+ connection_allowed(w->ifd, w->client_ip, w->client_host, sizeof(w->client_host),
+ web_allow_registry_from, "registry", web_allow_registry_dns))
+ w->acl |= WEB_CLIENT_ACL_REGISTRY;
+
+ if (!web_allow_badges_from ||
+ connection_allowed(w->ifd, w->client_ip, w->client_host, sizeof(w->client_host),
+ web_allow_badges_from, "badges", web_allow_badges_dns))
+ w->acl |= WEB_CLIENT_ACL_BADGE;
+
+ if (!web_allow_mgmt_from ||
+ connection_allowed(w->ifd, w->client_ip, w->client_host, sizeof(w->client_host),
+ web_allow_mgmt_from, "management", web_allow_mgmt_dns))
+ w->acl |= WEB_CLIENT_ACL_MGMT;
+
+ if (!web_allow_streaming_from ||
+ connection_allowed(w->ifd, w->client_ip, w->client_host, sizeof(w->client_host),
+ web_allow_streaming_from, "streaming", web_allow_streaming_dns))
+ w->acl |= WEB_CLIENT_ACL_STREAMING;
+
+ if (!web_allow_netdataconf_from ||
+ connection_allowed(w->ifd, w->client_ip, w->client_host, sizeof(w->client_host),
+ web_allow_netdataconf_from, "netdata.conf", web_allow_netdataconf_dns))
+ w->acl |= WEB_CLIENT_ACL_NETDATACONF;
+
+ w->acl &= w->port_acl;
+}
+
+
+// --------------------------------------------------------------------------------------
+
+void web_server_log_connection(struct web_client *w, const char *msg) {
+ log_access("%llu: %d '[%s]:%s' '%s'", w->id, gettid(), w->client_ip, w->client_port, msg);
+}
+
+// --------------------------------------------------------------------------------------
+
+void web_client_initialize_connection(struct web_client *w) {
+ int flag = 1;
+
+ if(unlikely(web_client_check_tcp(w) && setsockopt(w->ifd, IPPROTO_TCP, TCP_NODELAY, (char *) &flag, sizeof(int)) != 0))
+ debug(D_WEB_CLIENT, "%llu: failed to enable TCP_NODELAY on socket fd %d.", w->id, w->ifd);
+
+ flag = 1;
+ if(unlikely(setsockopt(w->ifd, SOL_SOCKET, SO_KEEPALIVE, (char *) &flag, sizeof(int)) != 0))
+ debug(D_WEB_CLIENT, "%llu: failed to enable SO_KEEPALIVE on socket fd %d.", w->id, w->ifd);
+
+ web_client_update_acl_matches(w);
+
+ w->origin[0] = '*'; w->origin[1] = '\0';
+ w->cookie1[0] = '\0'; w->cookie2[0] = '\0';
+ freez(w->user_agent); w->user_agent = NULL;
+
+ web_client_enable_wait_receive(w);
+
+ web_server_log_connection(w, "CONNECTED");
+
+ web_client_cache_verify(0);
+}
diff --git a/web/server/web_server.h b/web/server/web_server.h
new file mode 100644
index 0000000..7bfc81c
--- /dev/null
+++ b/web/server/web_server.h
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_WEB_SERVER_H
+#define NETDATA_WEB_SERVER_H 1
+
+#include "libnetdata/libnetdata.h"
+#include "web_client.h"
+
+#ifndef API_LISTEN_PORT
+#define API_LISTEN_PORT 19999
+#endif
+
+#ifndef API_LISTEN_BACKLOG
+#define API_LISTEN_BACKLOG 4096
+#endif
+
+typedef enum web_server_mode {
+ WEB_SERVER_MODE_STATIC_THREADED,
+ WEB_SERVER_MODE_NONE
+} WEB_SERVER_MODE;
+
+extern SIMPLE_PATTERN *web_allow_connections_from;
+extern int web_allow_connections_dns;
+extern SIMPLE_PATTERN *web_allow_dashboard_from;
+extern int web_allow_dashboard_dns;
+extern SIMPLE_PATTERN *web_allow_registry_from;
+extern int web_allow_registry_dns;
+extern SIMPLE_PATTERN *web_allow_badges_from;
+extern int web_allow_badges_dns;
+extern SIMPLE_PATTERN *web_allow_streaming_from;
+extern int web_allow_streaming_dns;
+extern SIMPLE_PATTERN *web_allow_netdataconf_from;
+extern int web_allow_netdataconf_dns;
+extern SIMPLE_PATTERN *web_allow_mgmt_from;
+extern int web_allow_mgmt_dns;
+
+extern WEB_SERVER_MODE web_server_mode;
+
+extern WEB_SERVER_MODE web_server_mode_id(const char *mode);
+extern const char *web_server_mode_name(WEB_SERVER_MODE id);
+
+extern void api_listen_sockets_setup(void);
+
+#define DEFAULT_TIMEOUT_TO_RECEIVE_FIRST_WEB_REQUEST 60
+#define DEFAULT_DISCONNECT_IDLE_WEB_CLIENTS_AFTER_SECONDS 60
+extern int web_client_timeout;
+extern int web_client_first_request_timeout;
+extern long web_client_streaming_rate_t;
+
+#ifdef WEB_SERVER_INTERNALS
+extern LISTEN_SOCKETS api_sockets;
+extern void web_client_update_acl_matches(struct web_client *w);
+extern void web_server_log_connection(struct web_client *w, const char *msg);
+extern void web_client_initialize_connection(struct web_client *w);
+extern struct web_client *web_client_create_on_listenfd(int listener);
+
+#include "web_client_cache.h"
+#endif // WEB_SERVER_INTERNALS
+
+#include "static/static-threaded.h"
+
+#include "daemon/common.h"
+
+#endif /* NETDATA_WEB_SERVER_H */