summaryrefslogtreecommitdiffstats
path: root/libnetdata/onewayalloc
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-04 14:31:17 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-04 14:31:17 +0000
commit8020f71afd34d7696d7933659df2d763ab05542f (patch)
tree2fdf1b5447ffd8bdd61e702ca183e814afdcb4fc /libnetdata/onewayalloc
parentInitial commit. (diff)
downloadnetdata-8020f71afd34d7696d7933659df2d763ab05542f.tar.xz
netdata-8020f71afd34d7696d7933659df2d763ab05542f.zip
Adding upstream version 1.37.1.upstream/1.37.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'libnetdata/onewayalloc')
-rw-r--r--libnetdata/onewayalloc/Makefile.am8
-rw-r--r--libnetdata/onewayalloc/README.md71
-rw-r--r--libnetdata/onewayalloc/onewayalloc.c193
-rw-r--r--libnetdata/onewayalloc/onewayalloc.h19
4 files changed, 291 insertions, 0 deletions
diff --git a/libnetdata/onewayalloc/Makefile.am b/libnetdata/onewayalloc/Makefile.am
new file mode 100644
index 0000000..161784b
--- /dev/null
+++ b/libnetdata/onewayalloc/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/onewayalloc/README.md b/libnetdata/onewayalloc/README.md
new file mode 100644
index 0000000..1f459c2
--- /dev/null
+++ b/libnetdata/onewayalloc/README.md
@@ -0,0 +1,71 @@
+<!--
+title: "One Way Allocator"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/libnetdata/onewayallocator/README.md
+-->
+
+# One Way Allocator
+
+This is a very fast single-threaded-only memory allocator, that minimized system calls
+when a lot of memory allocations needs to be made to perform a task, which all of them
+can be freed together when the task finishes.
+
+It has been designed to be used for netdata context queries.
+
+For netdata to perform a context query, it builds a virtual chart, a chart that contains
+all the dimensions of the charts having the same context. This process requires allocating
+several structures for each of the dimensions to attach them to the virtual chart. All
+these data can be freed immediately after the query finishes.
+
+## How it works
+
+1. The caller calls `ONEWAYALLOC *owa = onewayalloc_create(sizehint)` to create an OWA.
+ Internally this allocates the first memory buffer with size >= `sizehint`.
+ If `sizehint` is zero, it will allocate 1 hardware page (usually 4kb).
+ No need to check for success or failure. As with `mallocz()` in netdata, a `fatal()`
+ will be called if the allocation fails - although this will never fail, since Linux
+ does not really check if there is memory available for `mmap()` calls.
+
+2. The caller can then perform any number of the following calls to acquire memory:
+ - `onewayalloc_mallocz(owa, size)`, similar to `mallocz()`
+ - `onewayalloc_callocz(owa, nmemb, size)`, similar to `callocz()`
+ - `onewayalloc_strdupz(owa, string)`, similar to `strdupz()`
+ - `onewayalloc_memdupz(owa, ptr, size)`, similar to `mallocz()` and then `memcpy()`
+
+3. Once the caller has done all the work with the allocated buffers, all memory allocated
+ can be freed with `onewayalloc_destroy(owa)`.
+
+## How faster it is?
+
+On modern hardware, for any single query the performance improvement is marginal and not
+noticeable at all.
+
+We performed the following tests using the same huge context query (1000 charts,
+100 dimensions each = 100k dimensions)
+
+1. using `mallocz()`, 1 caller, 256 queries (sequential)
+2. using `mallocz()`, 256 callers, 1 query each (parallel)
+3. using `OWA`, 1 caller, 256 queries (sequential)
+4. using `OWA`, 256 callers, 1 query each (parallel)
+
+Netdata was configured to use 24 web threads on the 24 core server we used.
+
+The results are as follows:
+
+### sequential test
+
+branch|transactions|time to complete|transaction rate|average response time|min response time|max response time
+:---:|:---:|:---:|:---:|:---:|:---:|:---:|
+`malloc()`|256|322.35s|0.79/sec|1.26s|1.01s|1.87s
+`OWA`|256|310.19s|0.83/sec|1.21s|1.04s|1.63s
+
+For a single query, the improvement is just marginal and not noticeable at all.
+
+### parallel test
+
+branch|transactions|time to complete|transaction rate|average response time|min response time|max response time
+:---:|:---:|:---:|:---:|:---:|:---:|:---:|
+`malloc()`|256|84.72s|3.02/sec|68.43s|50.20s|84.71s
+`OWA`|256|39.35s|6.51/sec|34.48s|20.55s|39.34s
+
+For parallel workload, like the one executed by netdata.cloud, `OWA` provides a 54% overall speed improvement (more than double the overall
+user-experienced speed, including the data query itself).
diff --git a/libnetdata/onewayalloc/onewayalloc.c b/libnetdata/onewayalloc/onewayalloc.c
new file mode 100644
index 0000000..59c3b68
--- /dev/null
+++ b/libnetdata/onewayalloc/onewayalloc.c
@@ -0,0 +1,193 @@
+#include "onewayalloc.h"
+
+// https://www.gnu.org/software/libc/manual/html_node/Aligned-Memory-Blocks.html
+#define OWA_NATURAL_ALIGNMENT (sizeof(uintptr_t) * 2)
+
+typedef struct owa_page {
+ size_t stats_pages;
+ size_t stats_pages_size;
+ size_t stats_mallocs_made;
+ size_t stats_mallocs_size;
+ size_t size; // the total size of the page
+ size_t offset; // the first free byte of the page
+ struct owa_page *next; // the next page on the list
+ struct owa_page *last; // the last page on the list - we currently allocate on this
+} OWA_PAGE;
+
+// allocations need to be aligned to CPU register width
+// https://en.wikipedia.org/wiki/Data_structure_alignment
+static inline size_t natural_alignment(size_t size) {
+ if(unlikely(size % OWA_NATURAL_ALIGNMENT))
+ size = size + OWA_NATURAL_ALIGNMENT - (size % OWA_NATURAL_ALIGNMENT);
+
+ return size;
+}
+
+// Create an OWA
+// Once it is created, the called may call the onewayalloc_mallocz()
+// any number of times, for any amount of memory.
+
+static OWA_PAGE *onewayalloc_create_internal(OWA_PAGE *head, size_t size_hint) {
+ static size_t OWA_NATURAL_PAGE_SIZE = 0;
+
+ if(unlikely(!OWA_NATURAL_PAGE_SIZE)) {
+ long int page_size = sysconf(_SC_PAGE_SIZE);
+ if (unlikely(page_size == -1))
+ OWA_NATURAL_PAGE_SIZE = 4096;
+ else
+ OWA_NATURAL_PAGE_SIZE = page_size;
+ }
+
+ // our default page size
+ size_t size = OWA_NATURAL_PAGE_SIZE;
+
+ // make sure the new page will fit both the requested size
+ // and the OWA_PAGE structure at its beginning
+ size_hint += natural_alignment(sizeof(OWA_PAGE));
+
+ // prefer the user size if it is bigger than our size
+ if(size_hint > size) size = size_hint;
+
+ // try to allocate half of the total we have allocated already
+ if(likely(head)) {
+ size_t optimal_size = head->stats_pages_size / 2;
+ if(optimal_size > size) size = optimal_size;
+ }
+
+ // Make sure our allocations are always a multiple of the hardware page size
+ if(size % OWA_NATURAL_PAGE_SIZE) size = size + OWA_NATURAL_PAGE_SIZE - (size % OWA_NATURAL_PAGE_SIZE);
+
+ // OWA_PAGE *page = (OWA_PAGE *)netdata_mmap(NULL, size, MAP_ANONYMOUS|MAP_PRIVATE, 0);
+ // if(unlikely(!page)) fatal("Cannot allocate onewayalloc buffer of size %zu", size);
+ OWA_PAGE *page = (OWA_PAGE *)mallocz(size);
+
+ page->size = size;
+ page->offset = natural_alignment(sizeof(OWA_PAGE));
+ page->next = page->last = NULL;
+
+ if(unlikely(!head)) {
+ // this is the first time we are called
+ head = page;
+ head->stats_pages = 0;
+ head->stats_pages_size = 0;
+ head->stats_mallocs_made = 0;
+ head->stats_mallocs_size = 0;
+ }
+ else {
+ // link this page into our existing linked list
+ head->last->next = page;
+ }
+
+ head->last = page;
+ head->stats_pages++;
+ head->stats_pages_size += size;
+
+ return page;
+}
+
+ONEWAYALLOC *onewayalloc_create(size_t size_hint) {
+ return (ONEWAYALLOC *)onewayalloc_create_internal(NULL, size_hint);
+}
+
+void *onewayalloc_mallocz(ONEWAYALLOC *owa, size_t size) {
+ OWA_PAGE *head = (OWA_PAGE *)owa;
+ OWA_PAGE *page = head->last;
+
+ // update stats
+ head->stats_mallocs_made++;
+ head->stats_mallocs_size += size;
+
+ // make sure the size is aligned
+ size = natural_alignment(size);
+
+ if(unlikely(page->size - page->offset < size)) {
+ // we don't have enough space to fit the data
+ // let's get another page
+ page = onewayalloc_create_internal(head, (size > page->size)?size:page->size);
+ }
+
+ char *mem = (char *)page;
+ mem = &mem[page->offset];
+ page->offset += size;
+
+ return (void *)mem;
+}
+
+void *onewayalloc_callocz(ONEWAYALLOC *owa, size_t nmemb, size_t size) {
+ size_t total = nmemb * size;
+ void *mem = onewayalloc_mallocz(owa, total);
+ memset(mem, 0, total);
+ return mem;
+}
+
+char *onewayalloc_strdupz(ONEWAYALLOC *owa, const char *s) {
+ size_t size = strlen(s) + 1;
+ char *d = onewayalloc_mallocz((OWA_PAGE *)owa, size);
+ memcpy(d, s, size);
+ return d;
+}
+
+void *onewayalloc_memdupz(ONEWAYALLOC *owa, const void *src, size_t size) {
+ void *mem = onewayalloc_mallocz((OWA_PAGE *)owa, size);
+ // memcpy() is way faster than strcpy() since it does not check for '\0'
+ memcpy(mem, src, size);
+ return mem;
+}
+
+void onewayalloc_freez(ONEWAYALLOC *owa __maybe_unused, const void *ptr __maybe_unused) {
+#ifdef NETDATA_INTERNAL_CHECKS
+ // allow the caller to call us for a mallocz() allocation
+ // so try to find it in our memory and if it is not there
+ // log an error
+
+ if (unlikely(!ptr))
+ return;
+
+ OWA_PAGE *head = (OWA_PAGE *)owa;
+ OWA_PAGE *page;
+ uintptr_t seeking = (uintptr_t)ptr;
+
+ for(page = head; page ;page = page->next) {
+ uintptr_t start = (uintptr_t)page;
+ uintptr_t end = start + page->size;
+
+ if(seeking >= start && seeking <= end) {
+ // found it - it is ours
+ // just return to let the caller think we actually did something
+ return;
+ }
+ }
+
+ // not found - it is not ours
+ // let's free it with the system allocator
+ error("ONEWAYALLOC: request to free address 0x%p that is not allocated by this OWA", ptr);
+#endif
+
+ return;
+}
+
+void *onewayalloc_doublesize(ONEWAYALLOC *owa, const void *src, size_t oldsize) {
+ size_t newsize = oldsize * 2;
+ void *dst = onewayalloc_mallocz(owa, newsize);
+ memcpy(dst, src, oldsize);
+ onewayalloc_freez(owa, src);
+ return dst;
+}
+
+void onewayalloc_destroy(ONEWAYALLOC *owa) {
+ if(!owa) return;
+
+ OWA_PAGE *head = (OWA_PAGE *)owa;
+
+ //info("OWA: %zu allocations of %zu total bytes, in %zu pages of %zu total bytes",
+ // head->stats_mallocs_made, head->stats_mallocs_size,
+ // head->stats_pages, head->stats_pages_size);
+
+ OWA_PAGE *page = head;
+ while(page) {
+ OWA_PAGE *p = page;
+ page = page->next;
+ // munmap(p, p->size);
+ freez(p);
+ }
+}
diff --git a/libnetdata/onewayalloc/onewayalloc.h b/libnetdata/onewayalloc/onewayalloc.h
new file mode 100644
index 0000000..e536e05
--- /dev/null
+++ b/libnetdata/onewayalloc/onewayalloc.h
@@ -0,0 +1,19 @@
+#ifndef ONEWAYALLOC_H
+#define ONEWAYALLOC_H 1
+
+#include "../libnetdata.h"
+
+typedef void ONEWAYALLOC;
+
+ONEWAYALLOC *onewayalloc_create(size_t size_hint);
+void onewayalloc_destroy(ONEWAYALLOC *owa);
+
+void *onewayalloc_mallocz(ONEWAYALLOC *owa, size_t size);
+void *onewayalloc_callocz(ONEWAYALLOC *owa, size_t nmemb, size_t size);
+char *onewayalloc_strdupz(ONEWAYALLOC *owa, const char *s);
+void *onewayalloc_memdupz(ONEWAYALLOC *owa, const void *src, size_t size);
+void onewayalloc_freez(ONEWAYALLOC *owa, const void *ptr);
+
+void *onewayalloc_doublesize(ONEWAYALLOC *owa, const void *src, size_t oldsize);
+
+#endif // ONEWAYALLOC_H