summaryrefslogtreecommitdiffstats
path: root/lib/generic/queue.c
blob: f46050ff5bb26c5041669ec8a929fbbe9dcef59e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
/*  Copyright (C) 2018 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
 *  SPDX-License-Identifier: GPL-3.0-or-later
 */

#include "lib/generic/queue.h"
#include <string.h>

void queue_init_impl(struct queue *q, size_t item_size)
{
	q->len = 0;
	q->item_size = item_size;
	q->head = q->tail = NULL;
	/* Take 128 B (two x86 cache lines), except a small margin
	 * that the allocator can use for its overhead.
	 * Normally (64-bit pointers) this means 16 B header + 13*8 B data. */
	q->chunk_cap = (128 - offsetof(struct queue_chunk, data)
			- sizeof(size_t)
			) / item_size;
	if (!q->chunk_cap) q->chunk_cap = 1; /* item_size big enough by itself */
}

void queue_deinit_impl(struct queue *q)
{
	assert(q);
	struct queue_chunk *p = q->head;
	while (p != NULL) {
		struct queue_chunk *pf = p;
		p = p->next;
		free(pf);
	}
#ifndef NDEBUG
	memset(q, 0, sizeof(*q));
#endif
}

static struct queue_chunk * queue_chunk_new(const struct queue *q)
{
	struct queue_chunk *c = malloc(offsetof(struct queue_chunk, data)
					+ q->chunk_cap * q->item_size);
	if (unlikely(!c)) abort(); // simplify stuff
	memset(c, 0, offsetof(struct queue_chunk, data));
	c->cap = q->chunk_cap;
	/* ->begin and ->end are zero, i.e. we optimize for _push
	 * and not _push_head, by default. */
	return c;
}

/* Return pointer to the space for the new element. */
void * queue_push_impl(struct queue *q)
{
	assert(q);
	struct queue_chunk *t = q->tail; // shorthand
	if (unlikely(!t)) {
		assert(!q->head && !q->len);
		q->head = q->tail = t = queue_chunk_new(q);
	} else
	if (t->end == t->cap) {
		if (t->begin * 2 >= t->cap) {
			/* Utilization is below 50%, so let's shift (no overlap). */
			memcpy(t->data, t->data + t->begin * q->item_size,
				(t->end - t->begin) * q->item_size);
			t->end -= t->begin;
			t->begin = 0;
		} else {
			/* Let's grow the tail by another chunk. */
			assert(!t->next);
			t->next = queue_chunk_new(q);
			t = q->tail = t->next;
		}
	}
	assert(t->end < t->cap);
	++(q->len);
	++(t->end);
	return t->data + q->item_size * (t->end - 1);
}

/* Return pointer to the space for the new element. */
void * queue_push_head_impl(struct queue *q)
{
	/* When we have choice, we optimize for further _push_head,
	 * i.e. when shifting or allocating a chunk,
	 * we store items on the tail-end of the chunk. */
	assert(q);
	struct queue_chunk *h = q->head; // shorthand
	if (unlikely(!h)) {
		assert(!q->tail && !q->len);
		h = q->head = q->tail = queue_chunk_new(q);
		h->begin = h->end = h->cap;
	} else
	if (h->begin == 0) {
		if (h->end * 2 <= h->cap) {
			/* Utilization is below 50%, so let's shift (no overlap).
			 * Computations here are simplified due to h->begin == 0. */
			const int cnt = h->end;
			memcpy(h->data + (h->cap - cnt) * q->item_size, h->data,
				cnt * q->item_size);
			h->begin = h->cap - cnt;
			h->end = h->cap;
		} else {
			/* Let's grow the head by another chunk. */
			h = queue_chunk_new(q);
			h->next = q->head;
			q->head = h;
			h->begin = h->end = h->cap;
		}
	}
	assert(h->begin > 0);
	--(h->begin);
	++(q->len);
	return h->data + q->item_size * h->begin;
}