summaryrefslogtreecommitdiffstats
path: root/src/basic/alloc-util.c
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-10 20:49:52 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-10 20:49:52 +0000
commit55944e5e40b1be2afc4855d8d2baf4b73d1876b5 (patch)
tree33f869f55a1b149e9b7c2b7e201867ca5dd52992 /src/basic/alloc-util.c
parentInitial commit. (diff)
downloadsystemd-55944e5e40b1be2afc4855d8d2baf4b73d1876b5.tar.xz
systemd-55944e5e40b1be2afc4855d8d2baf4b73d1876b5.zip
Adding upstream version 255.4.upstream/255.4
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/basic/alloc-util.c')
-rw-r--r--src/basic/alloc-util.c135
1 files changed, 135 insertions, 0 deletions
diff --git a/src/basic/alloc-util.c b/src/basic/alloc-util.c
new file mode 100644
index 0000000..fc98610
--- /dev/null
+++ b/src/basic/alloc-util.c
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+
+#include <malloc.h>
+#include <stdint.h>
+#include <string.h>
+
+#include "alloc-util.h"
+#include "macro.h"
+#include "memory-util.h"
+
+void* memdup(const void *p, size_t l) {
+ void *ret;
+
+ assert(l == 0 || p);
+
+ ret = malloc(l ?: 1);
+ if (!ret)
+ return NULL;
+
+ return memcpy_safe(ret, p, l);
+}
+
+void* memdup_suffix0(const void *p, size_t l) {
+ void *ret;
+
+ assert(l == 0 || p);
+
+ /* The same as memdup() but place a safety NUL byte after the allocated memory */
+
+ if (_unlikely_(l == SIZE_MAX)) /* prevent overflow */
+ return NULL;
+
+ ret = malloc(l + 1);
+ if (!ret)
+ return NULL;
+
+ ((uint8_t*) ret)[l] = 0;
+ return memcpy_safe(ret, p, l);
+}
+
+void* greedy_realloc(
+ void **p,
+ size_t need,
+ size_t size) {
+
+ size_t a, newalloc;
+ void *q;
+
+ assert(p);
+
+ /* We use malloc_usable_size() for determining the current allocated size. On all systems we care
+ * about this should be safe to rely on. Should there ever arise the need to avoid relying on this we
+ * can instead locally fall back to realloc() on every call, rounded up to the next exponent of 2 or
+ * so. */
+
+ if (*p && (size == 0 || (MALLOC_SIZEOF_SAFE(*p) / size >= need)))
+ return *p;
+
+ if (_unlikely_(need > SIZE_MAX/2)) /* Overflow check */
+ return NULL;
+ newalloc = need * 2;
+
+ if (size_multiply_overflow(newalloc, size))
+ return NULL;
+ a = newalloc * size;
+
+ if (a < 64) /* Allocate at least 64 bytes */
+ a = 64;
+
+ q = realloc(*p, a);
+ if (!q)
+ return NULL;
+
+ return *p = q;
+}
+
+void* greedy_realloc0(
+ void **p,
+ size_t need,
+ size_t size) {
+
+ size_t before, after;
+ uint8_t *q;
+
+ assert(p);
+
+ before = MALLOC_SIZEOF_SAFE(*p); /* malloc_usable_size() will return 0 on NULL input, as per docs */
+
+ q = greedy_realloc(p, need, size);
+ if (!q)
+ return NULL;
+
+ after = MALLOC_SIZEOF_SAFE(q);
+
+ if (size == 0) /* avoid division by zero */
+ before = 0;
+ else
+ before = (before / size) * size; /* Round down */
+
+ if (after > before)
+ memzero(q + before, after - before);
+
+ return q;
+}
+
+void* greedy_realloc_append(
+ void **p,
+ size_t *n_p,
+ const void *from,
+ size_t n_from,
+ size_t size) {
+
+ uint8_t *q;
+
+ assert(p);
+ assert(n_p);
+ assert(from || n_from == 0);
+
+ if (n_from > SIZE_MAX - *n_p)
+ return NULL;
+
+ q = greedy_realloc(p, *n_p + n_from, size);
+ if (!q)
+ return NULL;
+
+ memcpy_safe(q + *n_p * size, from, n_from * size);
+
+ *n_p += n_from;
+
+ return q;
+}
+
+void *expand_to_usable(void *ptr, size_t newsize _unused_) {
+ return ptr;
+}