diff options
Diffstat (limited to 'contrib/ucw')
l--------- | contrib/ucw/LICENSE | 1 | ||||
-rw-r--r-- | contrib/ucw/alloc.h | 38 | ||||
-rw-r--r-- | contrib/ucw/config.h | 58 | ||||
-rw-r--r-- | contrib/ucw/lib.h | 125 | ||||
-rw-r--r-- | contrib/ucw/libucw.spdx | 10 | ||||
-rw-r--r-- | contrib/ucw/mempool-fmt.c | 99 | ||||
-rw-r--r-- | contrib/ucw/mempool.c | 601 | ||||
-rw-r--r-- | contrib/ucw/mempool.h | 572 |
8 files changed, 1504 insertions, 0 deletions
diff --git a/contrib/ucw/LICENSE b/contrib/ucw/LICENSE new file mode 120000 index 0000000..0cb7f47 --- /dev/null +++ b/contrib/ucw/LICENSE @@ -0,0 +1 @@ +../licenses/LGPL2
\ No newline at end of file diff --git a/contrib/ucw/alloc.h b/contrib/ucw/alloc.h new file mode 100644 index 0000000..4b5440b --- /dev/null +++ b/contrib/ucw/alloc.h @@ -0,0 +1,38 @@ +/* + * UCW Library -- Generic allocators + * + * (c) 2014 Martin Mares <mj@ucw.cz> + * SPDX-License-Identifier: LGPL-2.1-or-later + * Source: https://www.ucw.cz/libucw/ + */ + +#ifndef _UCW_ALLOC_H +#define _UCW_ALLOC_H + +/** + * This structure describes a generic allocator. It provides pointers + * to three functions, which handle the actual (re)allocations. + **/ +struct ucw_allocator { + void * (*alloc)(struct ucw_allocator *alloc, size_t size); + void * (*realloc)(struct ucw_allocator *alloc, void *ptr, size_t old_size, size_t new_size); + void (*free)(struct ucw_allocator *alloc, void *ptr); +}; + +/* alloc-std.c */ + +/** + * [[std]] + * This allocator uses <<basics:xmalloc()>>, <<basics:xrealloc()>> and <<basics:xfree()>>. The memory + * it allocates is left unitialized. + **/ +extern struct ucw_allocator ucw_allocator_std; + +/** + * [[zeroing]] + * This allocator uses <<basics:xmalloc()>>, <<basics:xrealloc()>> and <<basics:xfree()>>. All memory + * is zeroed upon allocation. + **/ +extern struct ucw_allocator ucw_allocator_zeroed; + +#endif diff --git a/contrib/ucw/config.h b/contrib/ucw/config.h new file mode 100644 index 0000000..3c94104 --- /dev/null +++ b/contrib/ucw/config.h @@ -0,0 +1,58 @@ +/* + * UCW Library -- Configuration-Dependent Definitions + * + * (c) 1997--2012 Martin Mares <mj@ucw.cz> + * (c) 2006 Robert Spalek <robert@ucw.cz> + * + * SPDX-License-Identifier: LGPL-2.1-or-later + * Source: https://www.ucw.cz/libucw/ + */ + +#ifndef _UCW_CONFIG_H +#define _UCW_CONFIG_H + +/* Default page size and pointer alignment */ +#ifndef CPU_PAGE_SIZE +#define CPU_PAGE_SIZE 4096 +#endif +#define CPU_STRUCT_ALIGN sizeof(void *) + +/* Tell libc we're going to use all extensions available */ + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif + +/* Types (based on standard C99 integers) */ + +#include <stddef.h> +#include <stdint.h> + +typedef uint8_t byte; /** Exactly 8 bits, unsigned **/ +typedef uint8_t u8; /** Exactly 8 bits, unsigned **/ +typedef int8_t s8; /** Exactly 8 bits, signed **/ +typedef uint16_t u16; /** Exactly 16 bits, unsigned **/ +typedef int16_t s16; /** Exactly 16 bits, signed **/ +typedef uint32_t u32; /** Exactly 32 bits, unsigned **/ +typedef int32_t s32; /** Exactly 32 bits, signed **/ +typedef uint64_t u64; /** Exactly 64 bits, unsigned **/ +typedef int64_t s64; /** Exactly 64 bits, signed **/ + + +#ifndef uint /* Redefining typedef is a C11 feature. */ +typedef unsigned int uint; /** A better pronounceable alias for `unsigned int` **/ +#define uint uint +#endif + +typedef s64 timestamp_t; /** Milliseconds since an unknown epoch **/ + +// FIXME: This should be removed soon +typedef uint uns; /** Backwards compatible alias for `uint' ***/ + +#ifdef CONFIG_UCW_LARGE_FILES +typedef s64 ucw_off_t; /** File position (either 32- or 64-bit, depending on `CONFIG_UCW_LARGE_FILES`). **/ +#else +typedef s32 ucw_off_t; +#endif + +#endif diff --git a/contrib/ucw/lib.h b/contrib/ucw/lib.h new file mode 100644 index 0000000..89b3a20 --- /dev/null +++ b/contrib/ucw/lib.h @@ -0,0 +1,125 @@ +/* + * The UCW Library -- Miscellaneous Functions + * + * (c) 1997--2014 Martin Mares <mj@ucw.cz> + * (c) 2005--2014 Tomas Valla <tom@ucw.cz> + * (c) 2006 Robert Spalek <robert@ucw.cz> + * (c) 2007 Pavel Charvat <pchar@ucw.cz> + * + * SPDX-License-Identifier: LGPL-2.1-or-later + * Source: https://www.ucw.cz/libucw/ + */ + +#ifndef _UCW_LIB_H +#define _UCW_LIB_H + +#include <stdarg.h> +#include <stdbool.h> +#include <stdlib.h> + +#ifdef CONFIG_UCW_CLEAN_ABI +#define assert_failed ucw_assert_failed +#define assert_failed_msg ucw_assert_failed_msg +#define assert_failed_noinfo ucw_assert_failed_noinfo +#define big_alloc ucw_big_alloc +#define big_alloc_zero ucw_big_alloc_zero +#define big_free ucw_big_free +#define die ucw_die +#define log_die_hook ucw_log_die_hook +#define log_file ucw_log_file +#define log_fork ucw_log_fork +#define log_init ucw_log_init +#define log_pid ucw_log_pid +#define log_title ucw_log_title +#define msg ucw_msg +#define page_alloc ucw_page_alloc +#define page_alloc_zero ucw_page_alloc_zero +#define page_free ucw_page_free +#define page_realloc ucw_page_realloc +#define random_max ucw_random_max +#define random_max_u64 ucw_random_max_u64 +#define random_u32 ucw_random_u32 +#define random_u64 ucw_random_u64 +#define vdie ucw_vdie +#define vmsg ucw_vmsg +#define xfree ucw_xfree +#define xmalloc ucw_xmalloc +#define xmalloc_zero ucw_xmalloc_zero +#define xrealloc ucw_xrealloc +#define xstrdup ucw_xstrdup +#endif + +/*** === Macros for handling structures, offsets and alignment ***/ + +#define CHECK_PTR_TYPE(x, type) ((x)-(type)(x) + (type)(x)) /** Check that a pointer @x is of type @type. Fail compilation if not. **/ +#define PTR_TO(s, i) &((s*)0)->i /** Return OFFSETOF() in form of a pointer. **/ +#define OFFSETOF(s, i) ((uint)offsetof(s, i)) /** Offset of item @i from the start of structure @s **/ +#define SKIP_BACK(s, i, p) ((s *)((char *)p - OFFSETOF(s, i))) /** Given a pointer @p to item @i of structure @s, return a pointer to the start of the struct. **/ + +/** Align an integer @s to the nearest higher multiple of @a (which should be a power of two) **/ +#define ALIGN_TO(s, a) (((s)+a-1)&~(a-1)) + +/** Align a pointer @p to the nearest higher multiple of @s. **/ +#define ALIGN_PTR(p, s) ((uintptr_t)(p) % (s) ? (typeof(p))((uintptr_t)(p) + (s) - (uintptr_t)(p) % (s)) : (p)) + +#define UNALIGNED_PART(ptr, type) (((uintptr_t) (ptr)) % sizeof(type)) + +/*** === Other utility macros ***/ + +#define MIN(a,b) (((a)<(b))?(a):(b)) /** Minimum of two numbers **/ +#define MAX(a,b) (((a)>(b))?(a):(b)) /** Maximum of two numbers **/ +#define CLAMP(x,min,max) ({ typeof(x) _t=x; (_t < min) ? min : (_t > max) ? max : _t; }) /** Clip a number @x to interval [@min,@max] **/ +#define ABS(x) ((x) < 0 ? -(x) : (x)) /** Absolute value **/ +#define ARRAY_SIZE(a) (sizeof(a)/sizeof(*(a))) /** The number of elements of an array **/ +#define STRINGIFY(x) #x /** Convert macro parameter to a string **/ +#define STRINGIFY_EXPANDED(x) STRINGIFY(x) /** Convert an expanded macro parameter to a string **/ +#define GLUE(x,y) x##y /** Glue two tokens together **/ +#define GLUE_(x,y) x##_##y /** Glue two tokens together, separating them by an underscore **/ + +#define COMPARE(x,y) do { if ((x)<(y)) return -1; if ((x)>(y)) return 1; } while(0) /** Numeric comparison function for qsort() **/ +#define REV_COMPARE(x,y) COMPARE(y,x) /** Reverse numeric comparison **/ +#define COMPARE_LT(x,y) do { if ((x)<(y)) return 1; if ((x)>(y)) return 0; } while(0) +#define COMPARE_GT(x,y) COMPARE_LT(y,x) + +#define ROL(x, bits) (((x) << (bits)) | ((uint)(x) >> (sizeof(uint)*8 - (bits)))) /** Bitwise rotation of an unsigned int to the left **/ +#define ROR(x, bits) (((uint)(x) >> (bits)) | ((x) << (sizeof(uint)*8 - (bits)))) /** Bitwise rotation of an unsigned int to the right **/ + +/*** === Shortcuts for GCC Extensions ***/ + +#ifdef __GNUC__ + +#include "ccan/compiler/compiler.h" +#define FORMAT_CHECK(x,y,z) __attribute__((format(x,y,z))) /** Checking of printf-like format strings **/ +#define likely(x) __builtin_expect((x),1) /** Use `if (likely(@x))` if @x is almost always true **/ +#define unlikely(x) __builtin_expect((x),0) /** Use `if (unlikely(@x))` to hint that @x is almost always false **/ + +#if __GNUC__ >= 4 || __GNUC__ == 3 && __GNUC_MINOR__ >= 3 +#define ALWAYS_INLINE inline __attribute__((always_inline)) /** Forcibly inline **/ +#define NO_INLINE __attribute__((noinline)) /** Forcibly uninline **/ +#else +#define ALWAYS_INLINE inline +#endif + +#if __GNUC__ >= 4 +#define LIKE_MALLOC __attribute__((malloc)) /** Function returns a "new" pointer **/ +#define SENTINEL_CHECK __attribute__((sentinel)) /** The last argument must be NULL **/ +#else +#define LIKE_MALLOC +#define SENTINEL_CHECK +#endif + +#else +#error This program requires the GNU C compiler. +#endif + +/*** + * [[logging]] + * + * === Basic logging functions (see <<log:,Logging>> and <ucw/log.h> for more) + ***/ + +#define DBG(x, ...) do { } while(0) +#define DBG_SPOT do { } while(0) +#define ASSERT(x) + +#endif diff --git a/contrib/ucw/libucw.spdx b/contrib/ucw/libucw.spdx new file mode 100644 index 0000000..e18b2ea --- /dev/null +++ b/contrib/ucw/libucw.spdx @@ -0,0 +1,10 @@ +SPDXVersion: SPDX-2.1 +DataLicense: CC0-1.0 +SPDXID: SPDXRef-DOCUMENT +DocumentName: libucw +DocumentNamespace: http://spdx.org/spdxdocs/spdx-v2.1-c3d39e26-6b71-46d4-88ea-e52750932ff3 + +PackageName: libucw +PackageDownloadLocation: git://git.ucw.cz/libucw.git@f1bde7104b04d5254d1d1d7dcc8de790a43a416f#ucw/ +PackageOriginator: Organization: United Computer Wizards +PackageLicenseDeclared: LGPL-2.1-or-later diff --git a/contrib/ucw/mempool-fmt.c b/contrib/ucw/mempool-fmt.c new file mode 100644 index 0000000..22f3a50 --- /dev/null +++ b/contrib/ucw/mempool-fmt.c @@ -0,0 +1,99 @@ +/* + * UCW Library -- Memory Pools (Formatting) + * + * (c) 2005 Martin Mares <mj@ucw.cz> + * (c) 2007 Pavel Charvat <pchar@ucw.cz> + * SPDX-License-Identifier: LGPL-2.1-or-later + * Source: https://www.ucw.cz/libucw/ + */ + +#include <ucw/lib.h> +#include <ucw/mempool.h> + +#include <stdio.h> +#include <string.h> + +/* FIXME: migrate to Knot DNS version of mempools. */ +#pragma GCC diagnostic ignored "-Wpointer-arith" + +static char * +mp_vprintf_at(struct mempool *mp, size_t ofs, const char *fmt, va_list args) +{ + char *ret = mp_grow(mp, ofs + 1) + ofs; + va_list args2; + va_copy(args2, args); + int cnt = vsnprintf(ret, mp_avail(mp) - ofs, fmt, args2); + va_end(args2); + if (cnt < 0) + { + /* Our C library doesn't support C99 return value of vsnprintf, so we need to iterate */ + do + { + ret = mp_expand(mp) + ofs; + va_copy(args2, args); + cnt = vsnprintf(ret, mp_avail(mp) - ofs, fmt, args2); + va_end(args2); + } + while (cnt < 0); + } + else if ((uint)cnt >= mp_avail(mp) - ofs) + { + ret = mp_grow(mp, ofs + cnt + 1) + ofs; + va_copy(args2, args); + vsnprintf(ret, cnt + 1, fmt, args2); + va_end(args2); + } + mp_end(mp, ret + cnt + 1); + return ret - ofs; +} + +char * +mp_vprintf(struct mempool *mp, const char *fmt, va_list args) +{ + mp_start(mp, 1); + return mp_vprintf_at(mp, 0, fmt, args); +} + +char * +mp_printf(struct mempool *p, const char *fmt, ...) +{ + va_list args; + va_start(args, fmt); + char *res = mp_vprintf(p, fmt, args); + va_end(args); + return res; +} + +char * +mp_vprintf_append(struct mempool *mp, char *ptr, const char *fmt, va_list args) +{ + size_t ofs = mp_open(mp, ptr); + ASSERT(ofs && !ptr[ofs - 1]); + return mp_vprintf_at(mp, ofs - 1, fmt, args); +} + +char * +mp_printf_append(struct mempool *mp, char *ptr, const char *fmt, ...) +{ + va_list args; + va_start(args, fmt); + char *res = mp_vprintf_append(mp, ptr, fmt, args); + va_end(args); + return res; +} + +#ifdef TEST + +int main(void) +{ + struct mempool *mp = mp_new(64); + char *x = mp_printf(mp, "<Hello, %s!>", "World"); + fputs(x, stdout); + x = mp_printf_append(mp, x, "<Appended>"); + fputs(x, stdout); + x = mp_printf(mp, "<Hello, %50s!>\n", "World"); + fputs(x, stdout); + return 0; +} + +#endif diff --git a/contrib/ucw/mempool.c b/contrib/ucw/mempool.c new file mode 100644 index 0000000..314b58e --- /dev/null +++ b/contrib/ucw/mempool.c @@ -0,0 +1,601 @@ +/* + * UCW Library -- Memory Pools (One-Time Allocation) + * + * (c) 1997--2014 Martin Mares <mj@ucw.cz> + * (c) 2007--2015 Pavel Charvat <pchar@ucw.cz> + * + * SPDX-License-Identifier: LGPL-2.1-or-later + * Source: https://www.ucw.cz/libucw/ + */ + +#undef LOCAL_DEBUG + +#include <ucw/config.h> +#include <ucw/lib.h> +#include <ucw/alloc.h> +#include <ucw/mempool.h> + +#include <string.h> +#include <stdlib.h> + +/* FIXME: migrate to Knot DNS version of mempools. */ +#pragma GCC diagnostic ignored "-Wpointer-arith" + +#define MP_CHUNK_TAIL ALIGN_TO(sizeof(struct mempool_chunk), CPU_STRUCT_ALIGN) +#define MP_SIZE_MAX (SIZE_MAX - MP_CHUNK_TAIL - CPU_PAGE_SIZE) + +struct mempool_chunk { +#ifdef CONFIG_DEBUG + struct mempool *pool; // Can be useful when analysing coredump for memory leaks +#endif + struct mempool_chunk *next; + size_t size; +}; + +static size_t +mp_align_size(size_t size) +{ +#ifdef CONFIG_UCW_POOL_IS_MMAP + size = MAX(size, 64 + MP_CHUNK_TAIL); + return ALIGN_TO(size, CPU_PAGE_SIZE) - MP_CHUNK_TAIL; +#else + return ALIGN_TO(size, CPU_STRUCT_ALIGN); +#endif +} + +static void *mp_allocator_alloc(struct ucw_allocator *a, size_t size) +{ + struct mempool *mp = (struct mempool *) a; + return mp_alloc_fast(mp, size); +} + +static void *mp_allocator_realloc(struct ucw_allocator *a, void *ptr, size_t old_size, size_t new_size) +{ + if (new_size <= old_size) + return ptr; + + /* + * In the future, we might want to do something like mp_realloc(), + * but we have to check that it is indeed the last block in the pool. + */ + struct mempool *mp = (struct mempool *) a; + void *new = mp_alloc_fast(mp, new_size); + memcpy(new, ptr, old_size); + return new; +} + +static void mp_allocator_free(struct ucw_allocator *a UNUSED, void *ptr UNUSED) +{ + // Does nothing +} + +void +mp_init(struct mempool *pool, size_t chunk_size) +{ + chunk_size = mp_align_size(MAX(sizeof(struct mempool), chunk_size)); + *pool = (struct mempool) { + .allocator = { + .alloc = mp_allocator_alloc, + .realloc = mp_allocator_realloc, + .free = mp_allocator_free, + }, + .chunk_size = chunk_size, + .threshold = chunk_size >> 1, + .last_big = &pool->last_big + }; +} + +static void * +mp_new_big_chunk(struct mempool *pool, size_t size) +{ + struct mempool_chunk *chunk; + chunk = malloc(size + MP_CHUNK_TAIL); + if (!chunk) + return NULL; + chunk = (struct mempool_chunk *)((char *)chunk + size); + chunk->size = size; + if (pool) + pool->total_size += size + MP_CHUNK_TAIL; + return chunk; +} + +static void +mp_free_big_chunk(struct mempool *pool, struct mempool_chunk *chunk) +{ + pool->total_size -= chunk->size + MP_CHUNK_TAIL; + free((void *)chunk - chunk->size); +} + +static void * +mp_new_chunk(struct mempool *pool, size_t size) +{ +#ifdef CONFIG_UCW_POOL_IS_MMAP + struct mempool_chunk *chunk; + chunk = page_alloc(size + MP_CHUNK_TAIL) + size; + chunk->size = size; + if (pool) + pool->total_size += size + MP_CHUNK_TAIL; + return chunk; +#else + return mp_new_big_chunk(pool, size); +#endif +} + +static void +mp_free_chunk(struct mempool *pool, struct mempool_chunk *chunk) +{ +#ifdef CONFIG_UCW_POOL_IS_MMAP + pool->total_size -= chunk->size + MP_CHUNK_TAIL; + page_free((void *)chunk - chunk->size, chunk->size + MP_CHUNK_TAIL); +#else + mp_free_big_chunk(pool, chunk); +#endif +} + +struct mempool * +mp_new(size_t chunk_size) +{ + chunk_size = mp_align_size(MAX(sizeof(struct mempool), chunk_size)); + struct mempool_chunk *chunk = mp_new_chunk(NULL, chunk_size); + struct mempool *pool = (void *)chunk - chunk_size; + DBG("Creating mempool %p with %u bytes long chunks", pool, chunk_size); + chunk->next = NULL; +#ifdef CONFIG_DEBUG + chunk->pool = pool; +#endif + *pool = (struct mempool) { + .allocator = { + .alloc = mp_allocator_alloc, + .realloc = mp_allocator_realloc, + .free = mp_allocator_free, + }, + .state = { .free = { chunk_size - sizeof(*pool) }, .last = { chunk } }, + .chunk_size = chunk_size, + .threshold = chunk_size >> 1, + .last_big = &pool->last_big, + .total_size = chunk->size + MP_CHUNK_TAIL, + }; + return pool; +} + +static void +mp_free_chain(struct mempool *pool, struct mempool_chunk *chunk) +{ + while (chunk) + { + struct mempool_chunk *next = chunk->next; + mp_free_chunk(pool, chunk); + chunk = next; + } +} + +static void +mp_free_big_chain(struct mempool *pool, struct mempool_chunk *chunk) +{ + while (chunk) + { + struct mempool_chunk *next = chunk->next; + mp_free_big_chunk(pool, chunk); + chunk = next; + } +} + +void +mp_delete(struct mempool *pool) +{ + DBG("Deleting mempool %p", pool); + mp_free_big_chain(pool, pool->state.last[1]); + mp_free_chain(pool, pool->unused); + mp_free_chain(pool, pool->state.last[0]); // can contain the mempool structure +} + +void +mp_flush(struct mempool *pool) +{ + mp_free_big_chain(pool, pool->state.last[1]); + struct mempool_chunk *chunk, *next; + for (chunk = pool->state.last[0]; chunk && (void *)chunk - chunk->size != pool; chunk = next) + { + next = chunk->next; + chunk->next = pool->unused; + pool->unused = chunk; + } + pool->state.last[0] = chunk; + pool->state.free[0] = chunk ? chunk->size - sizeof(*pool) : 0; + pool->state.last[1] = NULL; + pool->state.free[1] = 0; + pool->state.next = NULL; + pool->last_big = &pool->last_big; +} + +static void +mp_stats_chain(struct mempool *pool, struct mempool_chunk *chunk, struct mempool_stats *stats, uint idx) +{ + while (chunk) + { + stats->chain_size[idx] += chunk->size + MP_CHUNK_TAIL; + stats->chain_count[idx]++; + if (idx < 2) + { + stats->used_size += chunk->size; + if ((byte *)pool == (byte *)chunk - chunk->size) + stats->used_size -= sizeof(*pool); + } + chunk = chunk->next; + } + stats->total_size += stats->chain_size[idx]; +} + +void +mp_stats(struct mempool *pool, struct mempool_stats *stats) +{ + bzero(stats, sizeof(*stats)); + mp_stats_chain(pool, pool->state.last[0], stats, 0); + mp_stats_chain(pool, pool->state.last[1], stats, 1); + mp_stats_chain(pool, pool->unused, stats, 2); + stats->used_size -= pool->state.free[0] + pool->state.free[1]; + ASSERT(stats->total_size == pool->total_size); + ASSERT(stats->used_size <= stats->total_size); +} + +u64 +mp_total_size(struct mempool *pool) +{ + return pool->total_size; +} + +void +mp_shrink(struct mempool *pool, u64 min_total_size) +{ + while (1) + { + struct mempool_chunk *chunk = pool->unused; + if (!chunk || pool->total_size - (chunk->size + MP_CHUNK_TAIL) < min_total_size) + break; + pool->unused = chunk->next; + mp_free_chunk(pool, chunk); + } +} + +void * +mp_alloc_internal(struct mempool *pool, size_t size) +{ + struct mempool_chunk *chunk; + if (size <= pool->threshold) + { + pool->idx = 0; + if (pool->unused) + { + chunk = pool->unused; + pool->unused = chunk->next; + } + else + { + chunk = mp_new_chunk(pool, pool->chunk_size); +#ifdef CONFIG_DEBUG + chunk->pool = pool; +#endif + } + chunk->next = pool->state.last[0]; + pool->state.last[0] = chunk; + pool->state.free[0] = pool->chunk_size - size; + return (void *)chunk - pool->chunk_size; + } + else if (likely(size <= MP_SIZE_MAX)) + { + pool->idx = 1; + size_t aligned = ALIGN_TO(size, CPU_STRUCT_ALIGN); + chunk = mp_new_big_chunk(pool, aligned); + chunk->next = pool->state.last[1]; +#ifdef CONFIG_DEBUG + chunk->pool = pool; +#endif + pool->state.last[1] = chunk; + pool->state.free[1] = aligned - size; + return pool->last_big = (void *)chunk - aligned; + } + else + return NULL; +} + +void * +mp_alloc(struct mempool *pool, size_t size) +{ + return mp_alloc_fast(pool, size); +} + +void * +mp_alloc_noalign(struct mempool *pool, size_t size) +{ + return mp_alloc_fast_noalign(pool, size); +} + +void * +mp_alloc_zero(struct mempool *pool, size_t size) +{ + void *ptr = mp_alloc_fast(pool, size); + bzero(ptr, size); + return ptr; +} + +void * +mp_start_internal(struct mempool *pool, size_t size) +{ + void *ptr = mp_alloc_internal(pool, size); + if (!ptr) + return NULL; + pool->state.free[pool->idx] += size; + return ptr; +} + +void * +mp_start(struct mempool *pool, size_t size) +{ + return mp_start_fast(pool, size); +} + +void * +mp_start_noalign(struct mempool *pool, size_t size) +{ + return mp_start_fast_noalign(pool, size); +} + +void * +mp_grow_internal(struct mempool *pool, size_t size) +{ + if (unlikely(size > MP_SIZE_MAX)) + return NULL; + size_t avail = mp_avail(pool); + void *ptr = mp_ptr(pool); + if (pool->idx) + { + size_t amortized = likely(avail <= MP_SIZE_MAX / 2) ? avail * 2 : MP_SIZE_MAX; + amortized = MAX(amortized, size); + amortized = ALIGN_TO(amortized, CPU_STRUCT_ALIGN); + struct mempool_chunk *chunk = pool->state.last[1], *next = chunk->next; + pool->total_size = pool->total_size - chunk->size + amortized; + void *nptr = realloc(ptr, amortized + MP_CHUNK_TAIL); + if (!nptr) + return NULL; + ptr = nptr; + chunk = ptr + amortized; + chunk->next = next; + chunk->size = amortized; + pool->state.last[1] = chunk; + pool->state.free[1] = amortized; + pool->last_big = ptr; + return ptr; + } + else + { + void *p = mp_start_internal(pool, size); + memcpy(p, ptr, avail); + return p; + } +} + +size_t +mp_open(struct mempool *pool, void *ptr) +{ + return mp_open_fast(pool, ptr); +} + +void * +mp_realloc(struct mempool *pool, void *ptr, size_t size) +{ + return mp_realloc_fast(pool, ptr, size); +} + +void * +mp_realloc_zero(struct mempool *pool, void *ptr, size_t size) +{ + size_t old_size = mp_open_fast(pool, ptr); + ptr = mp_grow(pool, size); + if (size > old_size) + bzero(ptr + old_size, size - old_size); + mp_end(pool, ptr + size); + return ptr; +} + +void * +mp_spread_internal(struct mempool *pool, void *p, size_t size) +{ + void *old = mp_ptr(pool); + void *new = mp_grow_internal(pool, p-old+size); + if (!new) { + return NULL; + } + return p-old+new; +} + +void +mp_restore(struct mempool *pool, struct mempool_state *state) +{ + struct mempool_chunk *chunk, *next; + struct mempool_state s = *state; + for (chunk = pool->state.last[0]; chunk != s.last[0]; chunk = next) + { + next = chunk->next; + chunk->next = pool->unused; + pool->unused = chunk; + } + for (chunk = pool->state.last[1]; chunk != s.last[1]; chunk = next) + { + next = chunk->next; + mp_free_big_chunk(pool, chunk); + } + pool->state = s; + pool->last_big = &pool->last_big; +} + +struct mempool_state * +mp_push(struct mempool *pool) +{ + struct mempool_state state = pool->state; + struct mempool_state *p = mp_alloc_fast(pool, sizeof(*p)); + *p = state; + pool->state.next = p; + return p; +} + +void +mp_pop(struct mempool *pool) +{ + ASSERT(pool->state.next); + mp_restore(pool, pool->state.next); +} + +#ifdef TEST + +#include <ucw/getopt.h> +#include <stdio.h> +#include <stdlib.h> +#include <time.h> + +static void +fill(byte *ptr, uint len, uint magic) +{ + while (len--) + *ptr++ = (magic++ & 255); +} + +static void +check(byte *ptr, uint len, uint magic, uint align) +{ + ASSERT(!((uintptr_t)ptr & (align - 1))); + while (len--) + if (*ptr++ != (magic++ & 255)) + ASSERT(0); +} + +int main(int argc, char **argv) +{ + srand(time(NULL)); + log_init(argv[0]); + cf_def_file = NULL; + if (cf_getopt(argc, argv, CF_SHORT_OPTS, CF_NO_LONG_OPTS, NULL) >= 0 || argc != optind) + die("Invalid usage"); + + uint max = 1000, n = 0, m = 0, can_realloc = 0; + void *ptr[max]; + struct mempool_state *state[max]; + uint len[max], num[max], align[max]; + struct mempool *mp = mp_new(128), mp_static; + + for (uint i = 0; i < 5000; i++) + { + for (uint j = 0; j < n; j++) + check(ptr[j], len[j], j, align[j]); +#if 0 + DBG("free_small=%u free_big=%u idx=%u chunk_size=%u last_big=%p", mp->state.free[0], mp->state.free[1], mp->idx, mp->chunk_size, mp->last_big); + for (struct mempool_chunk *ch = mp->state.last[0]; ch; ch = ch->next) + DBG("small %p %p %p %d", (byte *)ch - ch->size, ch, ch + 1, ch->size); + for (struct mempool_chunk *ch = mp->state.last[1]; ch; ch = ch->next) + DBG("big %p %p %p %d", (byte *)ch - ch->size, ch, ch + 1, ch->size); +#endif + int r = random_max(100); + if ((r -= 1) < 0) + { + DBG("flush"); + mp_flush(mp); + n = m = 0; + } + else if ((r -= 1) < 0) + { + DBG("delete & new"); + mp_delete(mp); + if (random_max(2)) + mp = mp_new(random_max(0x1000) + 1); + else + mp = &mp_static, mp_init(mp, random_max(512) + 1); + n = m = 0; + } + else if (n < max && (r -= 30) < 0) + { + len[n] = random_max(0x2000); + DBG("alloc(%u)", len[n]); + align[n] = random_max(2) ? CPU_STRUCT_ALIGN : 1; + ptr[n] = (align[n] == 1) ? mp_alloc_fast_noalign(mp, len[n]) : mp_alloc_fast(mp, len[n]); + DBG(" -> (%p)", ptr[n]); + fill(ptr[n], len[n], n); + n++; + can_realloc = 1; + } + else if (n < max && (r -= 20) < 0) + { + len[n] = random_max(0x2000); + DBG("start(%u)", len[n]); + align[n] = random_max(2) ? CPU_STRUCT_ALIGN : 1; + ptr[n] = (align[n] == 1) ? mp_start_fast_noalign(mp, len[n]) : mp_start_fast(mp, len[n]); + DBG(" -> (%p)", ptr[n]); + fill(ptr[n], len[n], n); + n++; + can_realloc = 1; + goto grow; + } + else if (can_realloc && n && (r -= 10) < 0) + { + if (mp_open(mp, ptr[n - 1]) != len[n - 1]) + ASSERT(0); +grow: + { + uint k = n - 1; + for (uint i = random_max(4); i--; ) + { + uint l = len[k]; + len[k] = random_max(0x2000); + DBG("grow(%u)", len[k]); + ptr[k] = mp_grow(mp, len[k]); + DBG(" -> (%p)", ptr[k]); + check(ptr[k], MIN(l, len[k]), k, align[k]); + fill(ptr[k], len[k], k); + } + mp_end(mp, ptr[k] + len[k]); + } + } + else if (can_realloc && n && (r -= 20) < 0) + { + uint i = n - 1, l = len[i]; + DBG("realloc(%p, %u)", ptr[i], len[i]); + ptr[i] = mp_realloc(mp, ptr[i], len[i] = random_max(0x2000)); + DBG(" -> (%p, %u)", ptr[i], len[i]); + check(ptr[i], MIN(len[i], l), i, align[i]); + fill(ptr[i], len[i], i); + } + else if (m < max && (r -= 5) < 0) + { + DBG("push(%u)", m); + num[m] = n; + state[m++] = mp_push(mp); + can_realloc = 0; + } + else if (m && (r -= 2) < 0) + { + m--; + DBG("pop(%u)", m); + mp_pop(mp); + n = num[m]; + can_realloc = 0; + } + else if (m && (r -= 1) < 0) + { + uint i = random_max(m); + DBG("restore(%u)", i); + mp_restore(mp, state[i]); + n = num[m = i]; + can_realloc = 0; + } + else if (can_realloc && n && (r -= 5) < 0) + ASSERT(mp_size(mp, ptr[n - 1]) == len[n - 1]); + else + { + struct mempool_stats stats; + mp_stats(mp, &stats); + } + } + + mp_delete(mp); + return 0; +} + +#endif diff --git a/contrib/ucw/mempool.h b/contrib/ucw/mempool.h new file mode 100644 index 0000000..d9092a7 --- /dev/null +++ b/contrib/ucw/mempool.h @@ -0,0 +1,572 @@ +/* + * UCW Library -- Memory Pools + * + * (c) 1997--2015 Martin Mares <mj@ucw.cz> + * (c) 2007 Pavel Charvat <pchar@ucw.cz> + * SPDX-License-Identifier: LGPL-2.1-or-later + * Source: https://www.ucw.cz/libucw/ + */ + +#ifndef _UCW_POOLS_H +#define _UCW_POOLS_H + +#include "lib/defines.h" +#include <ucw/alloc.h> +#include <ucw/config.h> +#include <ucw/lib.h> +#include <string.h> + +#ifdef CONFIG_UCW_CLEAN_ABI +#define mp_alloc ucw_mp_alloc +#define mp_alloc_internal ucw_mp_alloc_internal +#define mp_alloc_noalign ucw_mp_alloc_noalign +#define mp_alloc_zero ucw_mp_alloc_zero +#define mp_delete ucw_mp_delete +#define mp_flush ucw_mp_flush +#define mp_grow_internal ucw_mp_grow_internal +#define mp_init ucw_mp_init +#define mp_memdup ucw_mp_memdup +#define mp_multicat ucw_mp_multicat +#define mp_new ucw_mp_new +#define mp_open ucw_mp_open +#define mp_pop ucw_mp_pop +#define mp_printf ucw_mp_printf +#define mp_printf_append ucw_mp_printf_append +#define mp_push ucw_mp_push +#define mp_realloc ucw_mp_realloc +#define mp_realloc_zero ucw_mp_realloc_zero +#define mp_restore ucw_mp_restore +#define mp_shrink ucw_mp_shrink +#define mp_spread_internal ucw_mp_spread_internal +#define mp_start ucw_mp_start +#define mp_start_internal ucw_mp_start_internal +#define mp_start_noalign ucw_mp_start_noalign +#define mp_stats ucw_mp_stats +#define mp_str_from_mem ucw_mp_str_from_mem +#define mp_strdup ucw_mp_strdup +#define mp_strjoin ucw_mp_strjoin +#define mp_total_size ucw_mp_total_size +#define mp_vprintf ucw_mp_vprintf +#define mp_vprintf_append ucw_mp_vprintf_append +#endif + +/*** + * [[defs]] + * Definitions + * ----------- + ***/ + +/** + * Memory pool state (see @mp_push(), ...). + * You should use this one as an opaque handle only, the insides are internal. + **/ +struct mempool_state { + size_t free[2]; + void *last[2]; + struct mempool_state *next; +}; + +/** + * Memory pool. + * You should use this one as an opaque handle only, the insides are internal. + **/ +struct mempool { + struct ucw_allocator allocator; // This must be the first element + struct mempool_state state; + void *unused, *last_big; + size_t chunk_size, threshold; + uint idx; + u64 total_size; +}; + +struct mempool_stats { /** Mempool statistics. See @mp_stats(). **/ + u64 total_size; /* Real allocated size in bytes */ + u64 used_size; /* Estimated size allocated from mempool to application */ + uint chain_count[3]; /* Number of allocated chunks in small/big/unused chains */ + u64 chain_size[3]; /* Size of allocated chunks in small/big/unused chains */ +}; + +/*** + * [[basic]] + * Basic manipulation + * ------------------ + ***/ + +/** + * Initialize a given mempool structure. + * @chunk_size must be in the interval `[1, SIZE_MAX / 2]`. + * It will allocate memory by this large chunks and take + * memory to satisfy requests from them. + * + * Memory pools can be treated as <<trans:respools,resources>>, see <<trans:res_mempool()>>. + **/ +KR_EXPORT +void mp_init(struct mempool *pool, size_t chunk_size); + +/** + * Allocate and initialize a new memory pool. + * See @mp_init() for @chunk_size limitations. + * + * The new mempool structure is allocated on the new mempool. + * + * Memory pools can be treated as <<trans:respools,resources>>, see <<trans:res_mempool()>>. + **/ +KR_EXPORT +struct mempool *mp_new(size_t chunk_size); + +/** + * Cleanup mempool initialized by mp_init or mp_new. + * Frees all the memory allocated by this mempool and, + * if created by @mp_new(), the @pool itself. + **/ +KR_EXPORT +void mp_delete(struct mempool *pool); + +/** + * Frees all data on a memory pool, but leaves it working. + * It can keep some of the chunks allocated to serve + * further allocation requests. Leaves the @pool alive, + * even if it was created with @mp_new(). + **/ +KR_EXPORT +void mp_flush(struct mempool *pool); + +/** + * Compute some statistics for debug purposes. + * See the definition of the <<struct_mempool_stats,mempool_stats structure>>. + * This function scans the chunk list, so it can be slow. If you are interested + * in total memory consumption only, mp_total_size() is faster. + **/ +void mp_stats(struct mempool *pool, struct mempool_stats *stats); + +/** + * Return how many bytes were allocated by the pool, including unused parts + * of chunks. This function runs in constant time. + **/ +u64 mp_total_size(struct mempool *pool); + +/** + * Release unused chunks of memory reserved for further allocation + * requests, but stop if mp_total_size() would drop below @min_total_size. + **/ +void mp_shrink(struct mempool *pool, u64 min_total_size); + +/*** + * [[alloc]] + * Allocation routines + * ------------------- + ***/ + +/* For internal use only, do not call directly */ +void *mp_alloc_internal(struct mempool *pool, size_t size) LIKE_MALLOC; + +/** + * The function allocates new @size bytes on a given memory pool. + * If the @size is zero, the resulting pointer is undefined, + * but it may be safely reallocated or used as the parameter + * to other functions below. + * + * The resulting pointer is always aligned to a multiple of + * `CPU_STRUCT_ALIGN` bytes and this condition remains true also + * after future reallocations. + **/ +KR_EXPORT +void *mp_alloc(struct mempool *pool, size_t size); + +/** + * The same as @mp_alloc(), but the result may be unaligned. + **/ +void *mp_alloc_noalign(struct mempool *pool, size_t size); + +/** + * The same as @mp_alloc(), but fills the newly allocated memory with zeroes. + **/ +void *mp_alloc_zero(struct mempool *pool, size_t size); + +/** + * Inlined version of @mp_alloc(). + **/ +static inline void *mp_alloc_fast(struct mempool *pool, size_t size) +{ + size_t avail = pool->state.free[0] & ~(size_t)(CPU_STRUCT_ALIGN - 1); + if (size <= avail) + { + pool->state.free[0] = avail - size; + return (byte *)pool->state.last[0] - avail; + } + else + return mp_alloc_internal(pool, size); +} + +/** + * Inlined version of @mp_alloc_noalign(). + **/ +static inline void *mp_alloc_fast_noalign(struct mempool *pool, size_t size) +{ + if (size <= pool->state.free[0]) + { + void *ptr = (byte *)pool->state.last[0] - pool->state.free[0]; + pool->state.free[0] -= size; + return ptr; + } + else + return mp_alloc_internal(pool, size); +} + +/** + * Return a generic allocator representing the given mempool. + **/ +static inline struct ucw_allocator *mp_get_allocator(struct mempool *mp) +{ + return &mp->allocator; +} + +/*** + * [[gbuf]] + * Growing buffers + * --------------- + * + * You do not need to know, how a buffer will need to be large, + * you can grow it incrementally to needed size. You can grow only + * one buffer at a time on a given mempool. + * + * Similar functionality is provided by <<growbuf:,growing buffes>> module. + ***/ + +/* For internal use only, do not call directly */ +void *mp_start_internal(struct mempool *pool, size_t size) LIKE_MALLOC; +void *mp_grow_internal(struct mempool *pool, size_t size); +void *mp_spread_internal(struct mempool *pool, void *p, size_t size); + +static inline uint mp_idx(struct mempool *pool, void *ptr) +{ + return ptr == pool->last_big; +} + +/** + * Open a new growing buffer (at least @size bytes long). + * If the @size is zero, the resulting pointer is undefined, + * but it may be safely reallocated or used as the parameter + * to other functions below. + * + * The resulting pointer is always aligned to a multiple of + * `CPU_STRUCT_ALIGN` bytes and this condition remains true also + * after future reallocations. There is an unaligned version as well. + * + * Keep in mind that you can't make any other pool allocations + * before you "close" the growing buffer with @mp_end(). + */ +void *mp_start(struct mempool *pool, size_t size); +void *mp_start_noalign(struct mempool *pool, size_t size); + +/** + * Inlined version of @mp_start(). + **/ +static inline void *mp_start_fast(struct mempool *pool, size_t size) +{ + size_t avail = pool->state.free[0] & ~(size_t)(CPU_STRUCT_ALIGN - 1); + if (size <= avail) + { + pool->idx = 0; + pool->state.free[0] = avail; + return (byte *)pool->state.last[0] - avail; + } + else + return mp_start_internal(pool, size); +} + +/** + * Inlined version of @mp_start_noalign(). + **/ +static inline void *mp_start_fast_noalign(struct mempool *pool, size_t size) +{ + if (size <= pool->state.free[0]) + { + pool->idx = 0; + return (byte *)pool->state.last[0] - pool->state.free[0]; + } + else + return mp_start_internal(pool, size); +} + +/** + * Return start pointer of the growing buffer allocated by latest @mp_start() or a similar function. + **/ +static inline void *mp_ptr(struct mempool *pool) +{ + return (byte *)pool->state.last[pool->idx] - pool->state.free[pool->idx]; +} + +/** + * Return the number of bytes available for extending the growing buffer. + * (Before a reallocation will be needed). + **/ +static inline size_t mp_avail(struct mempool *pool) +{ + return pool->state.free[pool->idx]; +} + +/** + * Grow the buffer allocated by @mp_start() to be at least @size bytes long + * (@size may be less than @mp_avail(), even zero). Reallocated buffer may + * change its starting position. The content will be unchanged to the minimum + * of the old and new sizes; newly allocated memory will be uninitialized. + * Multiple calls to mp_grow() have amortized linear cost wrt. the maximum value of @size. */ +static inline void *mp_grow(struct mempool *pool, size_t size) +{ + return (size <= mp_avail(pool)) ? mp_ptr(pool) : mp_grow_internal(pool, size); +} + +/** + * Grow the buffer by at least one byte -- equivalent to <<mp_grow(),`mp_grow`>>`(@pool, @mp_avail(pool) + 1)`. + **/ +static inline void *mp_expand(struct mempool *pool) +{ + return mp_grow_internal(pool, mp_avail(pool) + 1); +} + +/** + * Ensure that there is at least @size bytes free after @p, + * if not, reallocate and adjust @p. + **/ +static inline void *mp_spread(struct mempool *pool, void *p, size_t size) +{ + return (((size_t)((byte *)pool->state.last[pool->idx] - (byte *)p) >= size) ? p : mp_spread_internal(pool, p, size)); +} + +/** + * Append a character to the growing buffer. Called with @p pointing after + * the last byte in the buffer, returns a pointer after the last byte + * of the new (possibly reallocated) buffer. + **/ +static inline char *mp_append_char(struct mempool *pool, char *p, uint c) +{ + p = mp_spread(pool, p, 1); + *p++ = c; + return p; +} + +/** + * Append a memory block to the growing buffer. Called with @p pointing after + * the last byte in the buffer, returns a pointer after the last byte + * of the new (possibly reallocated) buffer. + **/ +static inline void *mp_append_block(struct mempool *pool, void *p, const void *block, size_t size) +{ + char *q = mp_spread(pool, p, size); + memcpy(q, block, size); + return q + size; +} + +/** + * Append a string to the growing buffer. Called with @p pointing after + * the last byte in the buffer, returns a pointer after the last byte + * of the new (possibly reallocated) buffer. + **/ +static inline void *mp_append_string(struct mempool *pool, void *p, const char *str) +{ + return mp_append_block(pool, p, str, strlen(str)); +} + +/** + * Close the growing buffer. The @end must point just behind the data, you want to keep + * allocated (so it can be in the interval `[@mp_ptr(@pool), @mp_ptr(@pool) + @mp_avail(@pool)]`). + * Returns a pointer to the beginning of the just closed block. + **/ +static inline void *mp_end(struct mempool *pool, void *end) +{ + void *p = mp_ptr(pool); + pool->state.free[pool->idx] = (byte *)pool->state.last[pool->idx] - (byte *)end; + return p; +} + +/** + * Close the growing buffer as a string. That is, append a zero byte and call mp_end(). + **/ +static inline char *mp_end_string(struct mempool *pool, void *end) +{ + end = mp_append_char(pool, end, 0); + return mp_end(pool, end); +} + +/** + * Return size in bytes of the last allocated memory block (with @mp_alloc() or @mp_end()). + **/ +static inline size_t mp_size(struct mempool *pool, void *ptr) +{ + uint idx = mp_idx(pool, ptr); + return ((byte *)pool->state.last[idx] - (byte *)ptr) - pool->state.free[idx]; +} + +/** + * Open the last memory block (allocated with @mp_alloc() or @mp_end()) + * for growing and return its size in bytes. The contents and the start pointer + * remain unchanged. Do not forget to call @mp_end() to close it. + **/ +size_t mp_open(struct mempool *pool, void *ptr); + +/** + * Inlined version of @mp_open(). + **/ +static inline size_t mp_open_fast(struct mempool *pool, void *ptr) +{ + pool->idx = mp_idx(pool, ptr); + size_t size = ((byte *)pool->state.last[pool->idx] - (byte *)ptr) - pool->state.free[pool->idx]; + pool->state.free[pool->idx] += size; + return size; +} + +/** + * Reallocate the last memory block (allocated with @mp_alloc() or @mp_end()) + * to the new @size. Behavior is similar to @mp_grow(), but the resulting + * block is closed. + **/ +void *mp_realloc(struct mempool *pool, void *ptr, size_t size); + +/** + * The same as @mp_realloc(), but fills the additional bytes (if any) with zeroes. + **/ +void *mp_realloc_zero(struct mempool *pool, void *ptr, size_t size); + +/** + * Inlined version of @mp_realloc(). + **/ +static inline void *mp_realloc_fast(struct mempool *pool, void *ptr, size_t size) +{ + mp_open_fast(pool, ptr); + ptr = mp_grow(pool, size); + mp_end(pool, (byte *)ptr + size); + return ptr; +} + +/*** + * [[store]] + * Storing and restoring state + * --------------------------- + * + * Mempools can remember history of what was allocated and return back + * in time. + ***/ + +/** + * Save the current state of a memory pool. + * Do not call this function with an opened growing buffer. + **/ +static inline void mp_save(struct mempool *pool, struct mempool_state *state) +{ + *state = pool->state; + pool->state.next = state; +} + +/** + * Save the current state to a newly allocated mempool_state structure. + * Do not call this function with an opened growing buffer. + **/ +struct mempool_state *mp_push(struct mempool *pool); + +/** + * Restore the state saved by @mp_save() or @mp_push() and free all + * data allocated after that point (including the state structure itself). + * You can't reallocate the last memory block from the saved state. + **/ +void mp_restore(struct mempool *pool, struct mempool_state *state); + +/** + * Inlined version of @mp_restore(). + **/ +static inline void mp_restore_fast(struct mempool *pool, struct mempool_state *state) +{ + if (pool->state.last[0] != state->last[0] || pool->state.last[1] != state->last[1]) + mp_restore(pool, state); + else + { + pool->state = *state; + pool->last_big = &pool->last_big; + } +} + +/** + * Restore the state saved by the last call to @mp_push(). + * @mp_pop() and @mp_push() works as a stack so you can push more states safely. + **/ +void mp_pop(struct mempool *pool); + + +/*** + * [[string]] + * String operations + * ----------------- + ***/ + +char *mp_strdup(struct mempool *, const char *) LIKE_MALLOC; /** Makes a copy of a string on a mempool. Returns NULL for NULL string. **/ +void *mp_memdup(struct mempool *, const void *, size_t) LIKE_MALLOC; /** Makes a copy of a memory block on a mempool. **/ +/** + * Concatenates all passed strings. The last parameter must be NULL. + * This will concatenate two strings: + * + * char *message = mp_multicat(pool, "hello ", "world", NULL); + **/ +char *mp_multicat(struct mempool *, ...) LIKE_MALLOC SENTINEL_CHECK; +/** + * Concatenates two strings and stores result on @mp. + */ +static inline char *LIKE_MALLOC mp_strcat(struct mempool *mp, const char *x, const char *y) +{ + return mp_multicat(mp, x, y, NULL); +} +/** + * Join strings and place @sep between each two neighboring. + * @p is the mempool to provide memory, @a is array of strings and @n + * tells how many there is of them. + **/ +char *mp_strjoin(struct mempool *p, char **a, uint n, uint sep) LIKE_MALLOC; +/** + * Convert memory block to a string. Makes a copy of the given memory block + * in the mempool @p, adding an extra terminating zero byte at the end. + **/ +char *mp_str_from_mem(struct mempool *p, const void *mem, size_t len) LIKE_MALLOC; + + +/*** + * [[format]] + * Formatted output + * --------------- + ***/ + +/** + * printf() into a in-memory string, allocated on the memory pool. + **/ +KR_EXPORT +char *mp_printf(struct mempool *mp, const char *fmt, ...) FORMAT_CHECK(printf,2,3) LIKE_MALLOC; +/** + * Like @mp_printf(), but uses `va_list` for parameters. + **/ +char *mp_vprintf(struct mempool *mp, const char *fmt, va_list args) LIKE_MALLOC; +/** + * Like @mp_printf(), but it appends the data at the end of string + * pointed to by @ptr. The string is @mp_open()ed, so you have to + * provide something that can be. + * + * Returns pointer to the beginning of the string (the pointer may have + * changed due to reallocation). + * + * In some versions of LibUCW, this function was called mp_append_printf(). However, + * this name turned out to be confusing -- unlike other appending functions, this one is + * not called on an opened growing buffer. The old name will be preserved for backward + * compatibility for the time being. + **/ +KR_EXPORT +char *mp_printf_append(struct mempool *mp, char *ptr, const char *fmt, ...) FORMAT_CHECK(printf,3,4); +#define mp_append_printf mp_printf_append +/** + * Like @mp_printf_append(), but uses `va_list` for parameters. + * + * In some versions of LibUCW, this function was called mp_append_vprintf(). However, + * this name turned out to be confusing -- unlike other appending functions, this one is + * not called on an opened growing buffer. The old name will be preserved for backward + * compatibility for the time being. + **/ +char *mp_vprintf_append(struct mempool *mp, char *ptr, const char *fmt, va_list args); +#define mp_append_vprintf mp_vprintf_append + +#endif |