summaryrefslogtreecommitdiffstats
path: root/src/contrib/ucw
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-10 19:05:44 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-10 19:05:44 +0000
commitb045529c40c83601909dca7b76a53498e9a70f33 (patch)
tree88371572105933fd950676c07b3a12163a0c9de0 /src/contrib/ucw
parentInitial commit. (diff)
downloadknot-b045529c40c83601909dca7b76a53498e9a70f33.tar.xz
knot-b045529c40c83601909dca7b76a53498e9a70f33.zip
Adding upstream version 3.3.4.upstream/3.3.4
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/contrib/ucw')
-rw-r--r--src/contrib/ucw/LICENSE1
-rw-r--r--src/contrib/ucw/array-sort.h195
-rw-r--r--src/contrib/ucw/binsearch.h50
-rw-r--r--src/contrib/ucw/heap.c153
-rw-r--r--src/contrib/ucw/heap.h46
-rw-r--r--src/contrib/ucw/lists.c264
-rw-r--r--src/contrib/ucw/lists.h74
-rw-r--r--src/contrib/ucw/mempool.c323
-rw-r--r--src/contrib/ucw/mempool.h124
9 files changed, 1230 insertions, 0 deletions
diff --git a/src/contrib/ucw/LICENSE b/src/contrib/ucw/LICENSE
new file mode 100644
index 0000000..b463d57
--- /dev/null
+++ b/src/contrib/ucw/LICENSE
@@ -0,0 +1 @@
+../licenses/LGPL-2.0 \ No newline at end of file
diff --git a/src/contrib/ucw/array-sort.h b/src/contrib/ucw/array-sort.h
new file mode 100644
index 0000000..1ff1377
--- /dev/null
+++ b/src/contrib/ucw/array-sort.h
@@ -0,0 +1,195 @@
+/*
+ * UCW Library -- Universal Simple Array Sorter
+ *
+ * (c) 2003--2008 Martin Mares <mj@ucw.cz>
+ *
+ * This software may be freely distributed and used according to the terms
+ * of the GNU Lesser General Public License.
+ */
+
+#pragma once
+
+#include "contrib/macros.h"
+
+/*
+ * This is not a normal header file, it's a generator of sorting
+ * routines. Each time you include it with parameters set in the
+ * corresponding preprocessor macros, it generates an array sorter
+ * with the parameters given.
+ *
+ * You might wonder why the heck do we implement our own array sorter
+ * instead of using qsort(). The primary reason is that qsort handles
+ * only continuous arrays, but we need to sort array-like data structures
+ * where the only way to access elements is by using an indexing macro.
+ * Besides that, we are more than 2 times faster.
+ *
+ * So much for advocacy, there are the parameters (those marked with [*]
+ * are mandatory):
+ *
+ * ASORT_PREFIX(x) [*] add a name prefix (used on all global names
+ * defined by the sorter)
+ * ASORT_KEY_TYPE [*] data type of a single array entry key
+ * ASORT_ELT(i) returns the key of i-th element; if this macro is not
+ * defined, the function gets a pointer to an array to be sorted
+ * ASORT_LT(x,y) x < y for ASORT_KEY_TYPE (default: "x<y")
+ * ASORT_SWAP(i,j) swap i-th and j-th element (default: assume _ELT
+ * is an l-value and swap just the keys)
+ * ASORT_THRESHOLD threshold for switching between quicksort and insertsort
+ * ASORT_EXTRA_ARGS extra arguments for the sort function (they are always
+ * visible in all the macros supplied above), starts with comma
+ *
+ * After including this file, a function ASORT_PREFIX(sort)(unsigned array_size)
+ * or ASORT_PREFIX(sort)(ASORT_KEY_TYPE *array, unsigned array_size) [if ASORT_ELT
+ * is not defined] is declared and all parameter macros are automatically
+ * undef'd.
+ */
+
+#ifndef ASORT_LT
+#define ASORT_LT(x,y) ((x) < (y))
+#endif
+
+#ifndef ASORT_SWAP
+#define ASORT_SWAP(i,j) do { ASORT_KEY_TYPE tmp = ASORT_ELT(i); ASORT_ELT(i)=ASORT_ELT(j); ASORT_ELT(j)=tmp; } while (0)
+#endif
+
+#ifndef ASORT_THRESHOLD
+#define ASORT_THRESHOLD 8 /* Guesswork and experimentation */
+#endif
+
+#ifndef ASORT_EXTRA_ARGS
+#define ASORT_EXTRA_ARGS
+#endif
+
+#ifndef ASORT_ELT
+#define ASORT_ARRAY_ARG ASORT_KEY_TYPE *array,
+#define ASORT_ELT(i) array[i]
+#else
+#define ASORT_ARRAY_ARG
+#endif
+
+/**
+ * The generated sorting function. If `ASORT_ELT` macro is not provided, the
+ * @ASORT_ARRAY_ARG is equal to `ASORT_KEY_TYPE *array` and is the array to be
+ * sorted. If the macro is provided, this parameter is omitted. In that case,
+ * you can sort global variables or pass your structure by @ASORT_EXTRA_ARGS.
+ **/
+static void ASORT_PREFIX(sort)(ASORT_ARRAY_ARG unsigned array_size ASORT_EXTRA_ARGS)
+{
+ struct stk { int l, r; } stack[8*sizeof(unsigned)];
+ int l, r, left, right, m;
+ unsigned sp = 0;
+ ASORT_KEY_TYPE pivot;
+
+ if (array_size <= 1)
+ return;
+
+ /* QuickSort with optimizations a'la Sedgewick, but stop at ASORT_THRESHOLD */
+
+ left = 0;
+ right = array_size - 1;
+ for(;;)
+ {
+ l = left;
+ r = right;
+ m = (l+r)/2;
+ if (ASORT_LT(ASORT_ELT(m), ASORT_ELT(l)))
+ ASORT_SWAP(l,m);
+ if (ASORT_LT(ASORT_ELT(r), ASORT_ELT(m)))
+ {
+ ASORT_SWAP(m,r);
+ if (ASORT_LT(ASORT_ELT(m), ASORT_ELT(l)))
+ ASORT_SWAP(l,m);
+ }
+ pivot = ASORT_ELT(m);
+ do
+ {
+ while (ASORT_LT(ASORT_ELT(l), pivot))
+ l++;
+ while (ASORT_LT(pivot, ASORT_ELT(r)))
+ r--;
+ if (l < r)
+ {
+ ASORT_SWAP(l,r);
+ l++;
+ r--;
+ }
+ else if (l == r)
+ {
+ l++;
+ r--;
+ }
+ }
+ while (l <= r);
+ if ((r - left) >= ASORT_THRESHOLD && (right - l) >= ASORT_THRESHOLD)
+ {
+ /* Both partitions ok => push the larger one */
+ if ((r - left) > (right - l))
+ {
+ stack[sp].l = left;
+ stack[sp].r = r;
+ left = l;
+ }
+ else
+ {
+ stack[sp].l = l;
+ stack[sp].r = right;
+ right = r;
+ }
+ sp++;
+ }
+ else if ((r - left) >= ASORT_THRESHOLD)
+ {
+ /* Left partition OK, right undersize */
+ right = r;
+ }
+ else if ((right - l) >= ASORT_THRESHOLD)
+ {
+ /* Right partition OK, left undersize */
+ left = l;
+ }
+ else
+ {
+ /* Both partitions undersize => pop */
+ if (!sp)
+ break;
+ sp--;
+ left = stack[sp].l;
+ right = stack[sp].r;
+ }
+ }
+
+ /*
+ * We have a partially sorted array, finish by insertsort. Inspired
+ * by qsort() in GNU libc.
+ */
+
+ /* Find minimal element which will serve as a barrier */
+ r = MIN(array_size, ASORT_THRESHOLD);
+ m = 0;
+ for (l=1; l<r; l++)
+ if (ASORT_LT(ASORT_ELT(l),ASORT_ELT(m)))
+ m = l;
+ ASORT_SWAP(0,m);
+
+ /* Insertion sort */
+ for (m=1; m<(int)array_size; m++)
+ {
+ l=m;
+ while (ASORT_LT(ASORT_ELT(m),ASORT_ELT(l-1)))
+ l--;
+ while (l < m)
+ {
+ ASORT_SWAP(l,m);
+ l++;
+ }
+ }
+}
+
+#undef ASORT_PREFIX
+#undef ASORT_KEY_TYPE
+#undef ASORT_ELT
+#undef ASORT_LT
+#undef ASORT_SWAP
+#undef ASORT_THRESHOLD
+#undef ASORT_EXTRA_ARGS
+#undef ASORT_ARRAY_ARG
diff --git a/src/contrib/ucw/binsearch.h b/src/contrib/ucw/binsearch.h
new file mode 100644
index 0000000..b791d39
--- /dev/null
+++ b/src/contrib/ucw/binsearch.h
@@ -0,0 +1,50 @@
+/*
+ * UCW Library -- Generic Binary Search
+ *
+ * (c) 2005 Martin Mares <mj@ucw.cz>
+ *
+ * This software may be freely distributed and used according to the terms
+ * of the GNU Lesser General Public License.
+ */
+
+#pragma once
+
+/***
+ * [[defs]]
+ * Definitions
+ * -----------
+ ***/
+
+/**
+ * Find the first element not lower than \p x in the sorted array \p ary of \p N elements (non-decreasing order).
+ * Returns the index of the found element or \p N if no exists. Uses `ary_lt_x(ary,i,x)` to compare the i'th element with \p x.
+ * The time complexity is `O(log(N))`.
+ **/
+#define BIN_SEARCH_FIRST_GE_CMP(ary, N, ary_lt_x, x, ...) ({ \
+ unsigned l = 0, r = (N); \
+ while (l < r) \
+ { \
+ unsigned m = (l+r)/2; \
+ if (ary_lt_x(ary, m, x, __VA_ARGS__)) \
+ l = m+1; \
+ else \
+ r = m; \
+ } \
+ l; \
+})
+
+/**
+ * The default comparison macro for \ref BIN_SEARCH_FIRST_GE_CMP().
+ **/
+#define ARY_LT_NUM(ary,i,x) (ary)[i] < (x)
+
+/**
+ * Same as \ref BIN_SEARCH_FIRST_GE_CMP(), but uses the default `<` operator for comparisons.
+ **/
+#define BIN_SEARCH_FIRST_GE(ary,N,x) BIN_SEARCH_FIRST_GE_CMP(ary,N,ARY_LT_NUM,x)
+
+/**
+ * Search the sorted array \p ary of \p N elements (non-decreasing) for the first occurrence of \p x.
+ * Returns the index or -1 if no such element exists. Uses the `<` operator for comparisons.
+ **/
+#define BIN_SEARCH_EQ(ary,N,x) ({ int i = BIN_SEARCH_FIRST_GE(ary,N,x); if (i >= (N) || (ary)[i] != (x)) i=-1; i; })
diff --git a/src/contrib/ucw/heap.c b/src/contrib/ucw/heap.c
new file mode 100644
index 0000000..7d74aeb
--- /dev/null
+++ b/src/contrib/ucw/heap.c
@@ -0,0 +1,153 @@
+/*
+ * Binary heap
+ *
+ * (c) 2012 Ondrej Filip <feela@network.cz>
+ *
+ * This software may be freely distributed and used according to the terms
+ * of the GNU Lesser General Public License.
+ */
+
+/***
+ * Introduction
+ * ------------
+ *
+ * Binary heap is a simple data structure, which for example supports efficient insertions, deletions
+ * and access to the minimal inserted item. We define several macros for such operations.
+ * Note that because of simplicity of heaps, we have decided to define direct macros instead
+ * of a <<generic:,macro generator>> as for several other data structures in the Libucw.
+ *
+ * A heap is represented by a number of elements and by an array of values. Beware that we
+ * index this array from one, not from zero as do the standard C arrays.
+ *
+ * Most macros use these parameters:
+ *
+ * - @num - a variable (signed or unsigned integer) with the number of elements
+ * - @heap - a C array of type @type; the heap is stored in `heap[1] .. heap[num]`; `heap[0]` is unused
+ *
+ * A valid heap must follow these rules:
+ *
+ * - `num >= 0`
+ * - `heap[i] >= heap[i / 2]` for each `i` in `[2, num]`
+ *
+ * The first element `heap[1]` is always lower or equal to all other elements.
+ ***/
+
+#include <string.h>
+#include <stdlib.h>
+#include "contrib/ucw/heap.h"
+
+static inline void heap_swap(heap_val_t **e1, heap_val_t **e2)
+{
+ if (e1 == e2) return; /* Stack tmp should be faster than tmpelem. */
+ heap_val_t *tmp = *e1; /* Even faster than 2-XOR nowadays. */
+ *e1 = *e2;
+ *e2 = tmp;
+ int pos = (*e1)->pos;
+ (*e1)->pos = (*e2)->pos;
+ (*e2)->pos = pos;
+}
+
+int heap_init(struct heap *h, int (*cmp)(void *, void *), int init_size)
+{
+ int isize = init_size ? init_size : INITIAL_HEAP_SIZE;
+
+ h->num = 0;
+ h->max_size = isize;
+ h->cmp = cmp;
+ h->data = malloc((isize + 1) * sizeof(heap_val_t*)); /* Temp element unused. */
+
+ return h->data ? 1 : 0;
+}
+
+void heap_deinit(struct heap *h)
+{
+ free(h->data);
+ memset(h, 0, sizeof(*h));
+}
+
+static inline void _heap_bubble_down(struct heap *h, int e)
+{
+ int e1;
+ for (;;) {
+ e1 = 2 * e;
+ if (e1 > h->num) break;
+ if ((h->cmp(*HELEMENT(h, e), *HELEMENT(h, e1)) < 0) &&
+ (e1 == h->num || (h->cmp(*HELEMENT(h, e), *HELEMENT(h, e1 + 1)) < 0))) break;
+ if ((e1 != h->num) && (h->cmp(*HELEMENT(h, e1 + 1), *HELEMENT(h, e1)) < 0)) e1++;
+ heap_swap(HELEMENT(h, e), HELEMENT(h, e1));
+ e = e1;
+ }
+}
+
+static inline void _heap_bubble_up(struct heap *h, int e)
+{
+ int e1;
+ while (e > 1) {
+ e1 = e / 2;
+ if (h->cmp(*HELEMENT(h, e1), *HELEMENT(h, e)) < 0) break;
+ heap_swap(HELEMENT(h, e), HELEMENT(h, e1));
+ e = e1;
+ }
+}
+
+void heap_replace(struct heap *h, int pos, heap_val_t *e)
+{
+ *HELEMENT(h, pos) = e;
+ e->pos = pos;
+
+ if (pos == 1 || h->cmp(*HELEMENT(h, pos / 2), e) < 0) {
+ _heap_bubble_down(h, pos);
+ } else {
+ _heap_bubble_up(h, pos);
+ }
+}
+
+void heap_delmin(struct heap *h)
+{
+ if (h->num == 0) return;
+ if (h->num > 1) {
+ heap_swap(HHEAD(h), HELEMENT(h, h->num));
+ }
+ (*HELEMENT(h, h->num))->pos = 0;
+ h->num--;
+ _heap_bubble_down(h, 1);
+}
+
+int heap_insert(struct heap *h, heap_val_t *e)
+{
+ if (h->num == h->max_size) {
+ h->max_size = h->max_size * HEAP_INCREASE_STEP;
+ h->data = realloc(h->data, (h->max_size + 1) * sizeof(heap_val_t*));
+ if (!h->data) {
+ return 0;
+ }
+ }
+
+ h->num++;
+ *HELEMENT(h, h->num) = e;
+ e->pos = h->num;
+ _heap_bubble_up(h, h->num);
+ return 1;
+}
+
+int heap_find(struct heap *h, heap_val_t *elm)
+{
+ return ((struct heap_val *) elm)->pos;
+}
+
+void heap_delete(struct heap *h, int e)
+{
+ heap_swap(HELEMENT(h, e), HELEMENT(h, h->num));
+ (*HELEMENT(h, h->num))->pos = 0;
+ h->num--;
+ if (h->cmp(*HELEMENT(h, e), *HELEMENT(h, h->num + 1)) < 0) {
+ _heap_bubble_up(h, e);
+ } else {
+ _heap_bubble_down(h, e);
+ }
+
+ if ((h->num > INITIAL_HEAP_SIZE) && (h->num < h->max_size / HEAP_DECREASE_THRESHOLD)) {
+ h->max_size = h->max_size / HEAP_INCREASE_STEP;
+ h->data = realloc(h->data, (h->max_size + 1) * sizeof(heap_val_t*));
+ }
+}
diff --git a/src/contrib/ucw/heap.h b/src/contrib/ucw/heap.h
new file mode 100644
index 0000000..f85bb82
--- /dev/null
+++ b/src/contrib/ucw/heap.h
@@ -0,0 +1,46 @@
+/* Copyright (C) 2022 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+struct heap_val {
+ int pos;
+};
+
+typedef struct heap_val heap_val_t;
+
+struct heap {
+ int num; /* Number of elements */
+ int max_size; /* Size of allocated memory */
+ int (*cmp)(void *, void *);
+ heap_val_t **data;
+}; /* Array follows */
+
+#define INITIAL_HEAP_SIZE 512 /* initial heap size */
+#define HEAP_INCREASE_STEP 2 /* multiplier for each inflation, keep conservative */
+#define HEAP_DECREASE_THRESHOLD 2 /* threshold for deflation, keep conservative */
+#define HELEMENT(h,num) ((h)->data + (num))
+#define HHEAD(h) HELEMENT((h), 1)
+#define EMPTY_HEAP(h) ((h)->num == 0)
+
+int heap_init(struct heap *, int (*cmp)(void *, void *), int);
+void heap_deinit(struct heap *);
+
+void heap_delmin(struct heap *);
+int heap_insert(struct heap *, heap_val_t *);
+int heap_find(struct heap *, heap_val_t *);
+void heap_delete(struct heap *, int);
+void heap_replace(struct heap *, int, heap_val_t *);
diff --git a/src/contrib/ucw/lists.c b/src/contrib/ucw/lists.c
new file mode 100644
index 0000000..01af28f
--- /dev/null
+++ b/src/contrib/ucw/lists.c
@@ -0,0 +1,264 @@
+/*
+ * BIRD Library -- Linked Lists
+ *
+ * (c) 1998 Martin Mares <mj@ucw.cz>
+ * (c) 2015, 2020-2022 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
+ *
+ * Can be freely distributed and used under the terms of the GNU GPL.
+ */
+
+/**
+ * DOC: Linked lists
+ *
+ * The BIRD library provides a set of functions for operating on linked
+ * lists. The lists are internally represented as standard doubly linked
+ * lists with synthetic head and tail which makes all the basic operations
+ * run in constant time and contain no extra end-of-list checks. Each list
+ * is described by a &list structure, nodes can have any format as long
+ * as they start with a &node structure. If you want your nodes to belong
+ * to multiple lists at once, you can embed multiple &node structures in them
+ * and use the SKIP_BACK() macro to calculate a pointer to the start of the
+ * structure from a &node pointer, but beware of obscurity.
+ *
+ * There also exist safe linked lists (&slist, &snode and all functions
+ * being prefixed with |s_|) which support asynchronous walking very
+ * similar to that used in the &fib structure.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "contrib/ucw/lists.h"
+#include "contrib/mempattern.h"
+
+/**
+ * add_tail - append a node to a list
+ * \p l: linked list
+ * \p n: list node
+ *
+ * add_tail() takes a node \p n and appends it at the end of the list \p l.
+ */
+void
+add_tail(list_t *l, node_t *n)
+{
+ node_t *z = &l->tail;
+
+ n->next = z;
+ n->prev = z->prev;
+ z->prev->next = n;
+ z->prev = n;
+ assert(z->next == NULL);
+}
+
+/**
+ * add_head - prepend a node to a list
+ * \p l: linked list
+ * \p n: list node
+ *
+ * add_head() takes a node \p n and prepends it at the start of the list \p l.
+ */
+void
+add_head(list_t *l, node_t *n)
+{
+ node_t *z = &l->head;
+
+ n->next = z->next;
+ n->prev = z;
+ z->next->prev = n;
+ z->next = n;
+ assert(z->prev == NULL);
+}
+
+/**
+ * insert_node - insert a node to a list
+ * \p n: a new list node
+ * \p after: a node of a list
+ *
+ * Inserts a node \p n to a linked list after an already inserted
+ * node \p after.
+ */
+void
+insert_node(node_t *n, node_t *after)
+{
+ node_t *z = after->next;
+
+ n->next = z;
+ n->prev = after;
+ after->next = n;
+ z->prev = n;
+}
+
+/**
+ * rem_node - remove a node from a list
+ * \p n: node to be removed
+ *
+ * Removes a node \p n from the list it's linked in.
+ */
+void
+rem_node(node_t *n)
+{
+ node_t *z = n->prev;
+ node_t *x = n->next;
+
+ z->next = x;
+ x->prev = z;
+ n->prev = 0;
+ n->next = 0;
+}
+
+/**
+ * init_list - create an empty list
+ * \p l: list
+ *
+ * init_list() takes a &list structure and initializes its
+ * fields, so that it represents an empty list.
+ */
+void
+init_list(list_t *l)
+{
+ l->head.next = &l->tail;
+ l->head.prev = NULL;
+ l->tail.next = NULL;
+ l->tail.prev = &l->head;
+}
+
+/**
+ * add_tail_list - concatenate two lists
+ * \p to: destination list
+ * \p l: source list
+ *
+ * This function appends all elements of the list \p l to
+ * the list \p to in constant time.
+ */
+void
+add_tail_list(list_t *to, list_t *l)
+{
+ node_t *p = to->tail.prev;
+ node_t *q = l->head.next;
+
+ p->next = q;
+ q->prev = p;
+ to->tail.prev = l->tail.prev;
+}
+
+/**
+ * list_dup - duplicate list
+ * \p to: destination list
+ * \p l: source list
+ *
+ * This function duplicates all elements of the list \p l to
+ * the list \p to in linear time.
+ *
+ * This function only works with a homogenous item size.
+ */
+void list_dup(list_t *dst, list_t *src, size_t itemsz)
+{
+ node_t *n;
+ WALK_LIST(n, *src) {
+ node_t *i = malloc(itemsz);
+ memcpy(i, n, itemsz);
+ add_tail(dst, i);
+ }
+}
+
+/**
+ * list_size - gets number of nodes
+ * \p l: list
+ *
+ * This function counts nodes in list \p l and returns this number.
+ */
+size_t list_size(const list_t *l)
+{
+ size_t count = 0;
+
+ node_t *n;
+ WALK_LIST(n, *l) {
+ count++;
+ }
+
+ return count;
+}
+
+/**
+ * fix_list - correction of head/tail pointers when list had been memmove'd
+ * \p l: list
+ *
+ * WARNING: must not be called on empty list
+ */
+void fix_list(list_t *l)
+{
+ node_t *n = HEAD(*l);
+ assert(n->next != NULL);
+ n->prev = &l->head;
+
+ n = TAIL(*l);
+ assert(n->prev != NULL);
+ n->next = &l->tail;
+}
+
+/**
+ * ptrlist_add - add pointer to pointer list
+ * \p to: destination list
+ * \p val: added pointer
+ * \p mm: memory context
+ */
+ptrnode_t *ptrlist_add(list_t *to, void *val, knot_mm_t *mm)
+{
+ ptrnode_t *node = mm_alloc(mm , sizeof(ptrnode_t));
+ if (node == NULL) {
+ return NULL;
+ } else {
+ node->d = val;
+ }
+ add_tail(to, &node->n);
+ return node;
+}
+
+/**
+ * ptrlist_free - free all nodes in pointer list
+ * \p list: list nodes
+ * \p mm: memory context
+ */
+void ptrlist_free(list_t *list, knot_mm_t *mm)
+{
+ node_t *n, *nxt;
+ WALK_LIST_DELSAFE(n, nxt, *list) {
+ mm_free(mm, n);
+ }
+ init_list(list);
+}
+
+/**
+ * ptrlist_rem - remove pointer node
+ * \p val: pointer to remove
+ * \p mm: memory context
+ */
+void ptrlist_rem(ptrnode_t *node, knot_mm_t *mm)
+{
+ rem_node(&node->n);
+ mm_free(mm, node);
+}
+
+/**
+ * ptrlist_deep_free - free all nodes incl referenced data
+ * \p list: list nodes
+ * \p mm: memory context
+ */
+void ptrlist_deep_free(list_t *l, knot_mm_t *mm)
+{
+ ptrnode_t *n;
+ WALK_LIST(n, *l) {
+ mm_free(mm, n->d);
+ }
+ ptrlist_free(l, mm);
+}
+
+void ptrlist_free_custom(list_t *l, knot_mm_t *mm, ptrlist_free_cb free_cb)
+{
+ ptrnode_t *n;
+ WALK_LIST(n, *l) {
+ free_cb(n->d);
+ }
+ ptrlist_free(l, mm);
+}
diff --git a/src/contrib/ucw/lists.h b/src/contrib/ucw/lists.h
new file mode 100644
index 0000000..1a3ca95
--- /dev/null
+++ b/src/contrib/ucw/lists.h
@@ -0,0 +1,74 @@
+/*
+ * BIRD Library -- Linked Lists
+ *
+ * (c) 1998 Martin Mares <mj@ucw.cz>
+ * (c) 2015, 2020-2022 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
+ *
+ * Can be freely distributed and used under the terms of the GNU GPL.
+ */
+
+#pragma once
+
+#include <string.h>
+#include "libknot/mm_ctx.h"
+
+typedef struct node {
+ struct node *next, *prev;
+} node_t;
+
+typedef struct list {
+ node_t head, tail;
+} list_t;
+
+#define NODE (node_t *)
+#define HEAD(list) ((void *)((list).head.next))
+#define TAIL(list) ((void *)((list).tail.prev))
+#define WALK_LIST(n,list) for(n=HEAD(list);(NODE (n))->next; \
+ n=(void *)((NODE (n))->next))
+#define WALK_LIST_DELSAFE(n,nxt,list) \
+ for(n=HEAD(list); (nxt=(void *)((NODE (n))->next)); n=(void *) nxt)
+/* WALK_LIST_FIRST supposes that called code removes each processed node */
+#define WALK_LIST_FIRST(n,list) \
+ while(n=HEAD(list), (NODE (n))->next)
+#define WALK_LIST_BACKWARDS(n,list) for(n=TAIL(list);(NODE (n))->prev; \
+ n=(void *)((NODE (n))->prev))
+#define WALK_LIST_BACKWARDS_DELSAFE(n,prv,list) \
+ for(n=TAIL(list); prv=(void *)((NODE (n))->prev); n=(void *) prv)
+
+#define EMPTY_LIST(list) (!(NODE HEAD(list))->next)
+
+/*! \brief Free every node in the list. */
+#define WALK_LIST_FREE(list) \
+ do { \
+ node_t *n=0,*nxt=0; \
+ WALK_LIST_DELSAFE(n,nxt,list) { \
+ free(n); \
+ } \
+ init_list(&list); \
+ } while(0)
+
+void add_tail(list_t *, node_t *);
+void add_head(list_t *, node_t *);
+void rem_node(node_t *);
+void add_tail_list(list_t *, list_t *);
+void init_list(list_t *);
+void insert_node(node_t *, node_t *);
+void list_dup(list_t *dst, list_t *src, size_t itemsz);
+size_t list_size(const list_t *);
+void fix_list(list_t *);
+
+/*!
+ * \brief Generic pointer list implementation.
+ */
+typedef struct ptrnode {
+ node_t n;
+ void *d;
+} ptrnode_t;
+
+ptrnode_t *ptrlist_add(list_t *, void *, knot_mm_t *);
+void ptrlist_free(list_t *, knot_mm_t *);
+void ptrlist_rem(ptrnode_t *node, knot_mm_t *mm);
+void ptrlist_deep_free(list_t *, knot_mm_t *);
+
+typedef void (*ptrlist_free_cb)(void *);
+void ptrlist_free_custom(list_t *l, knot_mm_t *mm, ptrlist_free_cb free_cb);
diff --git a/src/contrib/ucw/mempool.c b/src/contrib/ucw/mempool.c
new file mode 100644
index 0000000..8e835c1
--- /dev/null
+++ b/src/contrib/ucw/mempool.c
@@ -0,0 +1,323 @@
+/*
+ * UCW Library -- Memory Pools (One-Time Allocation)
+ *
+ * (c) 1997--2001 Martin Mares <mj@ucw.cz>
+ * (c) 2007 Pavel Charvat <pchar@ucw.cz>
+ * (c) 2015, 2017 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
+ *
+ * This software may be freely distributed and used according to the terms
+ * of the GNU Lesser General Public License.
+ */
+
+#undef LOCAL_DEBUG
+
+#include <string.h>
+#include <strings.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <assert.h>
+#include "contrib/asan.h"
+#include "contrib/macros.h"
+#include "contrib/ucw/mempool.h"
+
+/** \todo This shouldn't be precalculated, but computed on load. */
+#define CPU_PAGE_SIZE 4096
+
+/** Align an integer \p s to the nearest higher multiple of \p a (which should be a power of two) **/
+#define ALIGN_TO(s, a) (((s)+a-1)&~(a-1))
+#define MP_CHUNK_TAIL ALIGN_TO(sizeof(struct mempool_chunk), CPU_STRUCT_ALIGN)
+#define MP_SIZE_MAX (~0U - MP_CHUNK_TAIL - CPU_PAGE_SIZE)
+#define DBG(s, ...)
+
+/** \note Imported MMAP backend from bigalloc.c */
+#define CONFIG_UCW_POOL_IS_MMAP
+#ifdef CONFIG_UCW_POOL_IS_MMAP
+#include <sys/mman.h>
+static void *
+page_alloc(uint64_t len)
+{
+ if (!len) {
+ return NULL;
+ }
+ if (len > SIZE_MAX) {
+ return NULL;
+ }
+ assert(!(len & (CPU_PAGE_SIZE-1)));
+ uint8_t *p = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
+ if (p == (uint8_t*) MAP_FAILED) {
+ return NULL;
+ }
+ return p;
+}
+
+static void
+page_free(void *start, uint64_t len)
+{
+ assert(!(len & (CPU_PAGE_SIZE-1)));
+ assert(!((uintptr_t) start & (CPU_PAGE_SIZE-1)));
+ munmap(start, len);
+}
+#endif
+
+struct mempool_chunk {
+ struct mempool_chunk *next;
+ unsigned size;
+};
+
+static unsigned
+mp_align_size(unsigned size)
+{
+#ifdef CONFIG_UCW_POOL_IS_MMAP
+ return ALIGN_TO(size + MP_CHUNK_TAIL, CPU_PAGE_SIZE) - MP_CHUNK_TAIL;
+#else
+ return ALIGN_TO(size, CPU_STRUCT_ALIGN);
+#endif
+}
+
+void
+mp_init(struct mempool *pool, unsigned chunk_size)
+{
+ chunk_size = mp_align_size(MAX(sizeof(struct mempool), chunk_size));
+ *pool = (struct mempool) {
+ .chunk_size = chunk_size,
+ .threshold = chunk_size >> 1,
+ .last_big = &pool->last_big
+ };
+}
+
+static void *
+mp_new_big_chunk(unsigned size)
+{
+ uint8_t *data = malloc(size + MP_CHUNK_TAIL);
+ if (!data) {
+ return NULL;
+ }
+ ASAN_POISON_MEMORY_REGION(data, size);
+ struct mempool_chunk *chunk = (struct mempool_chunk *)(data + size);
+ chunk->size = size;
+ return chunk;
+}
+
+static void
+mp_free_big_chunk(struct mempool_chunk *chunk)
+{
+ void *ptr = (uint8_t *)chunk - chunk->size;
+ ASAN_UNPOISON_MEMORY_REGION(ptr, chunk->size);
+ free(ptr);
+}
+
+static void *
+mp_new_chunk(unsigned size)
+{
+#ifdef CONFIG_UCW_POOL_IS_MMAP
+ uint8_t *data = page_alloc(size + MP_CHUNK_TAIL);
+ if (!data) {
+ return NULL;
+ }
+ ASAN_POISON_MEMORY_REGION(data, size);
+ struct mempool_chunk *chunk = (struct mempool_chunk *)(data + size);
+ chunk->size = size;
+ return chunk;
+#else
+ return mp_new_big_chunk(size);
+#endif
+}
+
+static void
+mp_free_chunk(struct mempool_chunk *chunk)
+{
+#ifdef CONFIG_UCW_POOL_IS_MMAP
+ uint8_t *data = (uint8_t *)chunk - chunk->size;
+ ASAN_UNPOISON_MEMORY_REGION(data, chunk->size);
+ page_free(data, chunk->size + MP_CHUNK_TAIL);
+#else
+ mp_free_big_chunk(chunk);
+#endif
+}
+
+struct mempool *
+mp_new(unsigned chunk_size)
+{
+ chunk_size = mp_align_size(MAX(sizeof(struct mempool), chunk_size));
+ struct mempool_chunk *chunk = mp_new_chunk(chunk_size);
+ struct mempool *pool = (void *)chunk - chunk_size;
+ ASAN_UNPOISON_MEMORY_REGION(pool, sizeof(*pool));
+ DBG("Creating mempool %p with %u bytes long chunks", pool, chunk_size);
+ chunk->next = NULL;
+ ASAN_POISON_MEMORY_REGION(chunk, sizeof(struct mempool_chunk));
+ *pool = (struct mempool) {
+ .state = { .free = { chunk_size - sizeof(*pool) }, .last = { chunk } },
+ .chunk_size = chunk_size,
+ .threshold = chunk_size >> 1,
+ .last_big = &pool->last_big
+ };
+ return pool;
+}
+
+static void
+mp_free_chain(struct mempool_chunk *chunk)
+{
+ while (chunk) {
+ ASAN_UNPOISON_MEMORY_REGION(chunk, sizeof(struct mempool_chunk));
+ struct mempool_chunk *next = chunk->next;
+ mp_free_chunk(chunk);
+ chunk = next;
+ }
+}
+
+static void
+mp_free_big_chain(struct mempool_chunk *chunk)
+{
+ while (chunk) {
+ ASAN_UNPOISON_MEMORY_REGION(chunk, sizeof(struct mempool_chunk));
+ struct mempool_chunk *next = chunk->next;
+ mp_free_big_chunk(chunk);
+ chunk = next;
+ }
+}
+
+void
+mp_delete(struct mempool *pool)
+{
+ if (pool == NULL) {
+ return;
+ }
+ DBG("Deleting mempool %p", pool);
+ mp_free_big_chain(pool->state.last[1]);
+ mp_free_chain(pool->unused);
+ mp_free_chain(pool->state.last[0]); // can contain the mempool structure
+}
+
+void
+mp_flush(struct mempool *pool)
+{
+ mp_free_big_chain(pool->state.last[1]);
+ struct mempool_chunk *chunk = pool->state.last[0], *next;
+ while (chunk) {
+ ASAN_UNPOISON_MEMORY_REGION(chunk, sizeof(struct mempool_chunk));
+ if ((uint8_t *)chunk - chunk->size == (uint8_t *)pool) {
+ break;
+ }
+ next = chunk->next;
+ chunk->next = pool->unused;
+ ASAN_POISON_MEMORY_REGION(chunk, sizeof(struct mempool_chunk));
+ pool->unused = chunk;
+ chunk = next;
+ }
+ pool->state.last[0] = chunk;
+ if (chunk) {
+ pool->state.free[0] = chunk->size - sizeof(*pool);
+ ASAN_POISON_MEMORY_REGION(chunk, sizeof(struct mempool_chunk));
+ } else {
+ pool->state.free[0] = 0;
+ }
+ pool->state.last[1] = NULL;
+ pool->state.free[1] = 0;
+ pool->last_big = &pool->last_big;
+}
+
+static void
+mp_stats_chain(struct mempool_chunk *chunk, struct mempool_stats *stats, unsigned idx)
+{
+ struct mempool_chunk *next;
+ while (chunk) {
+ ASAN_UNPOISON_MEMORY_REGION(chunk, sizeof(struct mempool_chunk));
+ stats->chain_size[idx] += chunk->size + sizeof(*chunk);
+ stats->chain_count[idx]++;
+ next = chunk->next;
+ ASAN_POISON_MEMORY_REGION(chunk, sizeof(struct mempool_chunk));
+ chunk = next;
+ }
+ stats->total_size += stats->chain_size[idx];
+}
+
+void
+mp_stats(struct mempool *pool, struct mempool_stats *stats)
+{
+ bzero(stats, sizeof(*stats));
+ mp_stats_chain(pool->state.last[0], stats, 0);
+ mp_stats_chain(pool->state.last[1], stats, 1);
+ mp_stats_chain(pool->unused, stats, 2);
+}
+
+uint64_t
+mp_total_size(struct mempool *pool)
+{
+ struct mempool_stats stats;
+ mp_stats(pool, &stats);
+ return stats.total_size;
+}
+
+static void *
+mp_alloc_internal(struct mempool *pool, unsigned size)
+{
+ struct mempool_chunk *chunk;
+ if (size <= pool->threshold) {
+ pool->idx = 0;
+ if (pool->unused) {
+ chunk = pool->unused;
+ ASAN_UNPOISON_MEMORY_REGION(chunk, sizeof(struct mempool_chunk));
+ pool->unused = chunk->next;
+ } else {
+ chunk = mp_new_chunk(pool->chunk_size);
+ }
+ chunk->next = pool->state.last[0];
+ ASAN_POISON_MEMORY_REGION(chunk, sizeof(struct mempool_chunk));
+ pool->state.last[0] = chunk;
+ pool->state.free[0] = pool->chunk_size - size;
+ return (uint8_t *)chunk - pool->chunk_size;
+ } else if (size <= MP_SIZE_MAX) {
+ pool->idx = 1;
+ unsigned aligned = ALIGN_TO(size, CPU_STRUCT_ALIGN);
+ chunk = mp_new_big_chunk(aligned);
+ if (!chunk) {
+ return NULL;
+ }
+ chunk->next = pool->state.last[1];
+ ASAN_POISON_MEMORY_REGION(chunk, sizeof(struct mempool_chunk));
+ pool->state.last[1] = chunk;
+ pool->state.free[1] = aligned - size;
+ return pool->last_big = (uint8_t *)chunk - aligned;
+ } else {
+ fprintf(stderr, "Cannot allocate %u bytes from a mempool", size);
+ assert(0);
+ return NULL;
+ }
+}
+
+void *
+mp_alloc(struct mempool *pool, unsigned size)
+{
+ unsigned avail = pool->state.free[0] & ~(CPU_STRUCT_ALIGN - 1);
+ void *ptr = NULL;
+ if (size <= avail) {
+ pool->state.free[0] = avail - size;
+ ptr = (uint8_t*)pool->state.last[0] - avail;
+ } else {
+ ptr = mp_alloc_internal(pool, size);
+ }
+ ASAN_UNPOISON_MEMORY_REGION(ptr, size);
+ return ptr;
+}
+
+void *
+mp_alloc_noalign(struct mempool *pool, unsigned size)
+{
+ void *ptr = NULL;
+ if (size <= pool->state.free[0]) {
+ ptr = (uint8_t*)pool->state.last[0] - pool->state.free[0];
+ pool->state.free[0] -= size;
+ } else {
+ ptr = mp_alloc_internal(pool, size);
+ }
+ ASAN_UNPOISON_MEMORY_REGION(ptr, size);
+ return ptr;
+}
+
+void *
+mp_alloc_zero(struct mempool *pool, unsigned size)
+{
+ void *ptr = mp_alloc(pool, size);
+ bzero(ptr, size);
+ return ptr;
+}
diff --git a/src/contrib/ucw/mempool.h b/src/contrib/ucw/mempool.h
new file mode 100644
index 0000000..c5a4fa8
--- /dev/null
+++ b/src/contrib/ucw/mempool.h
@@ -0,0 +1,124 @@
+/*
+ * UCW Library -- Memory Pools
+ *
+ * (c) 1997--2005 Martin Mares <mj@ucw.cz>
+ * (c) 2007 Pavel Charvat <pchar@ucw.cz>
+ * (c) 2015, 2017 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
+ *
+ * This software may be freely distributed and used according to the terms
+ * of the GNU Lesser General Public License.
+ */
+
+#pragma once
+
+#include <string.h>
+#include <stdint.h>
+
+#define CPU_STRUCT_ALIGN (sizeof(void*))
+
+/***
+ * [[defs]]
+ * Definitions
+ * -----------
+ ***/
+
+/**
+ * Memory pool state (see mp_push(), ...).
+ * You should use this one as an opaque handle only, the insides are internal.
+ **/
+struct mempool_state {
+ unsigned free[2];
+ void *last[2];
+};
+
+/**
+ * Memory pool.
+ * You should use this one as an opaque handle only, the insides are internal.
+ **/
+struct mempool {
+ struct mempool_state state;
+ void *unused, *last_big;
+ unsigned chunk_size, threshold, idx;
+};
+
+struct mempool_stats { /** Mempool statistics. See mp_stats(). **/
+ uint64_t total_size; /** Real allocated size in bytes. */
+ unsigned chain_count[3]; /** Number of allocated chunks in small/big/unused chains. */
+ unsigned chain_size[3]; /** Size of allocated chunks in small/big/unused chains. */
+};
+
+/***
+ * [[basic]]
+ * Basic manipulation
+ * ------------------
+ ***/
+
+/**
+ * Initialize a given mempool structure.
+ * \p chunk_size must be in the interval `[1, UINT_MAX / 2]`.
+ * It will allocate memory by this large chunks and take
+ * memory to satisfy requests from them.
+ *
+ * Memory pools can be treated as <<trans:respools,resources>>, see <<trans:res_mempool()>>.
+ **/
+void mp_init(struct mempool *pool, unsigned chunk_size);
+
+/**
+ * Allocate and initialize a new memory pool.
+ * See \ref mp_init() for \p chunk_size limitations.
+ *
+ * The new mempool structure is allocated on the new mempool.
+ *
+ * Memory pools can be treated as <<trans:respools,resources>>, see <<trans:res_mempool()>>.
+ **/
+struct mempool *mp_new(unsigned chunk_size);
+
+/**
+ * Cleanup mempool initialized by mp_init or mp_new.
+ * Frees all the memory allocated by this mempool and,
+ * if created by \ref mp_new(), the \p pool itself.
+ **/
+void mp_delete(struct mempool *pool);
+
+/**
+ * Frees all data on a memory pool, but leaves it working.
+ * It can keep some of the chunks allocated to serve
+ * further allocation requests. Leaves the \p pool alive,
+ * even if it was created with \ref mp_new().
+ **/
+void mp_flush(struct mempool *pool);
+
+/**
+ * Compute some statistics for debug purposes.
+ * See the definition of the <<struct_mempool_stats,mempool_stats structure>>.
+ **/
+void mp_stats(struct mempool *pool, struct mempool_stats *stats);
+uint64_t mp_total_size(struct mempool *pool); /** How many bytes were allocated by the pool. **/
+
+/***
+ * [[alloc]]
+ * Allocation routines
+ * -------------------
+ ***/
+
+/**
+ * The function allocates new \p size bytes on a given memory pool.
+ * If the \p size is zero, the resulting pointer is undefined,
+ * but it may be safely reallocated or used as the parameter
+ * to other functions below.
+ *
+ * The resulting pointer is always aligned to a multiple of
+ * `CPU_STRUCT_ALIGN` bytes and this condition remains true also
+ * after future reallocations.
+ **/
+void *mp_alloc(struct mempool *pool, unsigned size);
+
+/**
+ * The same as \ref mp_alloc(), but the result may be unaligned.
+ **/
+void *mp_alloc_noalign(struct mempool *pool, unsigned size);
+
+/**
+ * The same as \ref mp_alloc(), but fills the newly allocated memory with zeroes.
+ **/
+void *mp_alloc_zero(struct mempool *pool, unsigned size);