summaryrefslogtreecommitdiffstats
path: root/libnetdata/onewayalloc/onewayalloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'libnetdata/onewayalloc/onewayalloc.c')
-rw-r--r--libnetdata/onewayalloc/onewayalloc.c13
1 files changed, 13 insertions, 0 deletions
diff --git a/libnetdata/onewayalloc/onewayalloc.c b/libnetdata/onewayalloc/onewayalloc.c
index 59c3b685..2f007b18 100644
--- a/libnetdata/onewayalloc/onewayalloc.c
+++ b/libnetdata/onewayalloc/onewayalloc.c
@@ -14,6 +14,12 @@ typedef struct owa_page {
struct owa_page *last; // the last page on the list - we currently allocate on this
} OWA_PAGE;
+static size_t onewayalloc_total_memory = 0;
+
+size_t onewayalloc_allocated_memory(void) {
+ return __atomic_load_n(&onewayalloc_total_memory, __ATOMIC_RELAXED);
+}
+
// allocations need to be aligned to CPU register width
// https://en.wikipedia.org/wiki/Data_structure_alignment
static inline size_t natural_alignment(size_t size) {
@@ -60,6 +66,7 @@ static OWA_PAGE *onewayalloc_create_internal(OWA_PAGE *head, size_t size_hint) {
// OWA_PAGE *page = (OWA_PAGE *)netdata_mmap(NULL, size, MAP_ANONYMOUS|MAP_PRIVATE, 0);
// if(unlikely(!page)) fatal("Cannot allocate onewayalloc buffer of size %zu", size);
OWA_PAGE *page = (OWA_PAGE *)mallocz(size);
+ __atomic_add_fetch(&onewayalloc_total_memory, size, __ATOMIC_RELAXED);
page->size = size;
page->offset = natural_alignment(sizeof(OWA_PAGE));
@@ -183,11 +190,17 @@ void onewayalloc_destroy(ONEWAYALLOC *owa) {
// head->stats_mallocs_made, head->stats_mallocs_size,
// head->stats_pages, head->stats_pages_size);
+ size_t total_size = 0;
OWA_PAGE *page = head;
while(page) {
+ total_size += page->size;
+
OWA_PAGE *p = page;
page = page->next;
+
// munmap(p, p->size);
freez(p);
}
+
+ __atomic_sub_fetch(&onewayalloc_total_memory, total_size, __ATOMIC_RELAXED);
}