From f26f66d866ba1a9f3204e6fdfe2b07e67b5492ad Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 10 Apr 2024 21:41:32 +0200 Subject: Adding upstream version 2.8. Signed-off-by: Daniel Baumann --- util/mem.c | 109 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 109 insertions(+) create mode 100644 util/mem.c (limited to 'util/mem.c') diff --git a/util/mem.c b/util/mem.c new file mode 100644 index 0000000..d2be46e --- /dev/null +++ b/util/mem.c @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#include +#include +#include +#include +#include + +#include "mem.h" + +#include "common.h" + +#define ROUND_UP(N, S) ((((N) + (S) - 1) / (S)) * (S)) +#define HUGE_MIN 0x80000 + +void *nvme_alloc(size_t len) +{ + void *p; + + len = ROUND_UP(len, 0x1000); + if (posix_memalign((void *)&p, getpagesize(), len)) + return NULL; + + memset(p, 0, len); + return p; +} + +void *nvme_realloc(void *p, size_t len) +{ + size_t old_len = malloc_usable_size(p); + + void *result = nvme_alloc(len); + + if (p) { + memcpy(result, p, min(old_len, len)); + free(p); + } + + return result; +} + +void *nvme_alloc_huge(size_t len, struct nvme_mem_huge *mh) +{ + memset(mh, 0, sizeof(*mh)); + + len = ROUND_UP(len, 0x1000); + + /* + * For smaller allocation we just use posix_memalign and hope the kernel + * is able to convert to a contiguous memory region. + */ + if (len < HUGE_MIN) { + mh->p = nvme_alloc(len); + if (!mh->p) + return NULL; + mh->posix_memalign = true; + mh->len = len; + return mh->p; + } + + /* + * Larger allocation will almost certainly fail with the small + * allocation approach. Instead try pre-allocating memory from the + * HugeTLB pool. + * + * https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt + */ + mh->p = mmap(NULL, len, PROT_READ | PROT_WRITE, + MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB, -1, 0); + if (mh->p != MAP_FAILED) { + mh->len = len; + return mh->p; + } + + /* + * And if mmap fails because the pool is empty, try to use + * posix_memalign/madvise as fallback with a 2MB aligmnent in order to + * fullfil the request. This gives the kernel a chance to try to claim + * some huge pages. This might still fail though. + */ + len = ROUND_UP(len, 0x200000); + if (posix_memalign(&mh->p, 0x200000, len)) + return NULL; + mh->posix_memalign = true; + mh->len = len; + + memset(mh->p, 0, mh->len); + + if (madvise(mh->p, mh->len, MADV_HUGEPAGE) < 0) { + nvme_free_huge(mh); + return NULL; + } + + return mh->p; +} + +void nvme_free_huge(struct nvme_mem_huge *mh) + +{ + if (!mh || mh->len == 0) + return; + + if (mh->posix_memalign) + free(mh->p); + else + munmap(mh->p, mh->len); + + mh->len = 0; + mh->p = NULL; +} -- cgit v1.2.3