summaryrefslogtreecommitdiffstats
path: root/tables
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 06:23:09 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 06:23:09 +0000
commit30d479c28c831a0d4f1fdb54a9e346b0fc176be1 (patch)
treeaa35d7414ce9f1326abf6f723f6dfa5b0aa08b1d /tables
parentInitial commit. (diff)
downloadapr-upstream/1.7.2.tar.xz
apr-upstream/1.7.2.zip
Adding upstream version 1.7.2.upstream/1.7.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'tables')
-rw-r--r--tables/apr_hash.c552
-rw-r--r--tables/apr_skiplist.c864
-rw-r--r--tables/apr_tables.c1300
3 files changed, 2716 insertions, 0 deletions
diff --git a/tables/apr_hash.c b/tables/apr_hash.c
new file mode 100644
index 0000000..0bf4d28
--- /dev/null
+++ b/tables/apr_hash.c
@@ -0,0 +1,552 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "apr_private.h"
+
+#include "apr_general.h"
+#include "apr_pools.h"
+#include "apr_time.h"
+
+#include "apr_hash.h"
+
+#if APR_HAVE_STDLIB_H
+#include <stdlib.h>
+#endif
+#if APR_HAVE_STRING_H
+#include <string.h>
+#endif
+
+#if APR_POOL_DEBUG && APR_HAVE_STDIO_H
+#include <stdio.h>
+#endif
+
+/*
+ * The internal form of a hash table.
+ *
+ * The table is an array indexed by the hash of the key; collisions
+ * are resolved by hanging a linked list of hash entries off each
+ * element of the array. Although this is a really simple design it
+ * isn't too bad given that pools have a low allocation overhead.
+ */
+
+typedef struct apr_hash_entry_t apr_hash_entry_t;
+
+struct apr_hash_entry_t {
+ apr_hash_entry_t *next;
+ unsigned int hash;
+ const void *key;
+ apr_ssize_t klen;
+ const void *val;
+};
+
+/*
+ * Data structure for iterating through a hash table.
+ *
+ * We keep a pointer to the next hash entry here to allow the current
+ * hash entry to be freed or otherwise mangled between calls to
+ * apr_hash_next().
+ */
+struct apr_hash_index_t {
+ apr_hash_t *ht;
+ apr_hash_entry_t *this, *next;
+ unsigned int index;
+};
+
+/*
+ * The size of the array is always a power of two. We use the maximum
+ * index rather than the size so that we can use bitwise-AND for
+ * modular arithmetic.
+ * The count of hash entries may be greater depending on the chosen
+ * collision rate.
+ */
+struct apr_hash_t {
+ apr_pool_t *pool;
+ apr_hash_entry_t **array;
+ apr_hash_index_t iterator; /* For apr_hash_first(NULL, ...) */
+ unsigned int count, max, seed;
+ apr_hashfunc_t hash_func;
+ apr_hash_entry_t *free; /* List of recycled entries */
+};
+
+#define INITIAL_MAX 15 /* tunable == 2^n - 1 */
+
+
+/*
+ * Hash creation functions.
+ */
+
+static apr_hash_entry_t **alloc_array(apr_hash_t *ht, unsigned int max)
+{
+ return apr_pcalloc(ht->pool, sizeof(*ht->array) * (max + 1));
+}
+
+APR_DECLARE(apr_hash_t *) apr_hash_make(apr_pool_t *pool)
+{
+ apr_hash_t *ht;
+ apr_time_t now = apr_time_now();
+
+ ht = apr_palloc(pool, sizeof(apr_hash_t));
+ ht->pool = pool;
+ ht->free = NULL;
+ ht->count = 0;
+ ht->max = INITIAL_MAX;
+ ht->seed = (unsigned int)((now >> 32) ^ now ^ (apr_uintptr_t)pool ^
+ (apr_uintptr_t)ht ^ (apr_uintptr_t)&now) - 1;
+ ht->array = alloc_array(ht, ht->max);
+ ht->hash_func = NULL;
+
+ return ht;
+}
+
+APR_DECLARE(apr_hash_t *) apr_hash_make_custom(apr_pool_t *pool,
+ apr_hashfunc_t hash_func)
+{
+ apr_hash_t *ht = apr_hash_make(pool);
+ ht->hash_func = hash_func;
+ return ht;
+}
+
+
+/*
+ * Hash iteration functions.
+ */
+
+APR_DECLARE(apr_hash_index_t *) apr_hash_next(apr_hash_index_t *hi)
+{
+ hi->this = hi->next;
+ while (!hi->this) {
+ if (hi->index > hi->ht->max)
+ return NULL;
+
+ hi->this = hi->ht->array[hi->index++];
+ }
+ hi->next = hi->this->next;
+ return hi;
+}
+
+APR_DECLARE(apr_hash_index_t *) apr_hash_first(apr_pool_t *p, apr_hash_t *ht)
+{
+ apr_hash_index_t *hi;
+ if (p)
+ hi = apr_palloc(p, sizeof(*hi));
+ else
+ hi = &ht->iterator;
+
+ hi->ht = ht;
+ hi->index = 0;
+ hi->this = NULL;
+ hi->next = NULL;
+ return apr_hash_next(hi);
+}
+
+APR_DECLARE(void) apr_hash_this(apr_hash_index_t *hi,
+ const void **key,
+ apr_ssize_t *klen,
+ void **val)
+{
+ if (key) *key = hi->this->key;
+ if (klen) *klen = hi->this->klen;
+ if (val) *val = (void *)hi->this->val;
+}
+
+APR_DECLARE(const void *) apr_hash_this_key(apr_hash_index_t *hi)
+{
+ const void *key;
+
+ apr_hash_this(hi, &key, NULL, NULL);
+ return key;
+}
+
+APR_DECLARE(apr_ssize_t) apr_hash_this_key_len(apr_hash_index_t *hi)
+{
+ apr_ssize_t klen;
+
+ apr_hash_this(hi, NULL, &klen, NULL);
+ return klen;
+}
+
+APR_DECLARE(void *) apr_hash_this_val(apr_hash_index_t *hi)
+{
+ void *val;
+
+ apr_hash_this(hi, NULL, NULL, &val);
+ return val;
+}
+
+/*
+ * Expanding a hash table
+ */
+
+static void expand_array(apr_hash_t *ht)
+{
+ apr_hash_index_t *hi;
+ apr_hash_entry_t **new_array;
+ unsigned int new_max;
+
+ new_max = ht->max * 2 + 1;
+ new_array = alloc_array(ht, new_max);
+ for (hi = apr_hash_first(NULL, ht); hi; hi = apr_hash_next(hi)) {
+ unsigned int i = hi->this->hash & new_max;
+ hi->this->next = new_array[i];
+ new_array[i] = hi->this;
+ }
+ ht->array = new_array;
+ ht->max = new_max;
+}
+
+static unsigned int hashfunc_default(const char *char_key, apr_ssize_t *klen,
+ unsigned int hash)
+{
+ const unsigned char *key = (const unsigned char *)char_key;
+ const unsigned char *p;
+ apr_ssize_t i;
+
+ /*
+ * This is the popular `times 33' hash algorithm which is used by
+ * perl and also appears in Berkeley DB. This is one of the best
+ * known hash functions for strings because it is both computed
+ * very fast and distributes very well.
+ *
+ * The originator may be Dan Bernstein but the code in Berkeley DB
+ * cites Chris Torek as the source. The best citation I have found
+ * is "Chris Torek, Hash function for text in C, Usenet message
+ * <27038@mimsy.umd.edu> in comp.lang.c , October, 1990." in Rich
+ * Salz's USENIX 1992 paper about INN which can be found at
+ * <http://citeseer.nj.nec.com/salz92internetnews.html>.
+ *
+ * The magic of number 33, i.e. why it works better than many other
+ * constants, prime or not, has never been adequately explained by
+ * anyone. So I try an explanation: if one experimentally tests all
+ * multipliers between 1 and 256 (as I did while writing a low-level
+ * data structure library some time ago) one detects that even
+ * numbers are not useable at all. The remaining 128 odd numbers
+ * (except for the number 1) work more or less all equally well.
+ * They all distribute in an acceptable way and this way fill a hash
+ * table with an average percent of approx. 86%.
+ *
+ * If one compares the chi^2 values of the variants (see
+ * Bob Jenkins ``Hashing Frequently Asked Questions'' at
+ * http://burtleburtle.net/bob/hash/hashfaq.html for a description
+ * of chi^2), the number 33 not even has the best value. But the
+ * number 33 and a few other equally good numbers like 17, 31, 63,
+ * 127 and 129 have nevertheless a great advantage to the remaining
+ * numbers in the large set of possible multipliers: their multiply
+ * operation can be replaced by a faster operation based on just one
+ * shift plus either a single addition or subtraction operation. And
+ * because a hash function has to both distribute good _and_ has to
+ * be very fast to compute, those few numbers should be preferred.
+ *
+ * -- Ralf S. Engelschall <rse@engelschall.com>
+ */
+
+ if (*klen == APR_HASH_KEY_STRING) {
+ for (p = key; *p; p++) {
+ hash = hash * 33 + *p;
+ }
+ *klen = p - key;
+ }
+ else {
+ for (p = key, i = *klen; i; i--, p++) {
+ hash = hash * 33 + *p;
+ }
+ }
+
+ return hash;
+}
+
+APR_DECLARE_NONSTD(unsigned int) apr_hashfunc_default(const char *char_key,
+ apr_ssize_t *klen)
+{
+ return hashfunc_default(char_key, klen, 0);
+}
+
+/*
+ * This is where we keep the details of the hash function and control
+ * the maximum collision rate.
+ *
+ * If val is non-NULL it creates and initializes a new hash entry if
+ * there isn't already one there; it returns an updatable pointer so
+ * that hash entries can be removed.
+ */
+
+static apr_hash_entry_t **find_entry(apr_hash_t *ht,
+ const void *key,
+ apr_ssize_t klen,
+ const void *val)
+{
+ apr_hash_entry_t **hep, *he;
+ unsigned int hash;
+
+ if (ht->hash_func)
+ hash = ht->hash_func(key, &klen);
+ else
+ hash = hashfunc_default(key, &klen, ht->seed);
+
+ /* scan linked list */
+ for (hep = &ht->array[hash & ht->max], he = *hep;
+ he; hep = &he->next, he = *hep) {
+ if (he->hash == hash
+ && he->klen == klen
+ && memcmp(he->key, key, klen) == 0)
+ break;
+ }
+ if (he || !val)
+ return hep;
+
+ /* add a new entry for non-NULL values */
+ if ((he = ht->free) != NULL)
+ ht->free = he->next;
+ else
+ he = apr_palloc(ht->pool, sizeof(*he));
+ he->next = NULL;
+ he->hash = hash;
+ he->key = key;
+ he->klen = klen;
+ he->val = val;
+ *hep = he;
+ ht->count++;
+ return hep;
+}
+
+APR_DECLARE(apr_hash_t *) apr_hash_copy(apr_pool_t *pool,
+ const apr_hash_t *orig)
+{
+ apr_hash_t *ht;
+ apr_hash_entry_t *new_vals;
+ unsigned int i, j;
+
+ ht = apr_palloc(pool, sizeof(apr_hash_t) +
+ sizeof(*ht->array) * (orig->max + 1) +
+ sizeof(apr_hash_entry_t) * orig->count);
+ ht->pool = pool;
+ ht->free = NULL;
+ ht->count = orig->count;
+ ht->max = orig->max;
+ ht->seed = orig->seed;
+ ht->hash_func = orig->hash_func;
+ ht->array = (apr_hash_entry_t **)((char *)ht + sizeof(apr_hash_t));
+
+ new_vals = (apr_hash_entry_t *)((char *)(ht) + sizeof(apr_hash_t) +
+ sizeof(*ht->array) * (orig->max + 1));
+ j = 0;
+ for (i = 0; i <= ht->max; i++) {
+ apr_hash_entry_t **new_entry = &(ht->array[i]);
+ apr_hash_entry_t *orig_entry = orig->array[i];
+ while (orig_entry) {
+ *new_entry = &new_vals[j++];
+ (*new_entry)->hash = orig_entry->hash;
+ (*new_entry)->key = orig_entry->key;
+ (*new_entry)->klen = orig_entry->klen;
+ (*new_entry)->val = orig_entry->val;
+ new_entry = &((*new_entry)->next);
+ orig_entry = orig_entry->next;
+ }
+ *new_entry = NULL;
+ }
+ return ht;
+}
+
+APR_DECLARE(void *) apr_hash_get(apr_hash_t *ht,
+ const void *key,
+ apr_ssize_t klen)
+{
+ apr_hash_entry_t *he;
+ he = *find_entry(ht, key, klen, NULL);
+ if (he)
+ return (void *)he->val;
+ else
+ return NULL;
+}
+
+APR_DECLARE(void) apr_hash_set(apr_hash_t *ht,
+ const void *key,
+ apr_ssize_t klen,
+ const void *val)
+{
+ apr_hash_entry_t **hep;
+ hep = find_entry(ht, key, klen, val);
+ if (*hep) {
+ if (!val) {
+ /* delete entry */
+ apr_hash_entry_t *old = *hep;
+ *hep = (*hep)->next;
+ old->next = ht->free;
+ ht->free = old;
+ --ht->count;
+ }
+ else {
+ /* replace entry */
+ (*hep)->val = val;
+ /* check that the collision rate isn't too high */
+ if (ht->count > ht->max) {
+ expand_array(ht);
+ }
+ }
+ }
+ /* else key not present and val==NULL */
+}
+
+APR_DECLARE(unsigned int) apr_hash_count(apr_hash_t *ht)
+{
+ return ht->count;
+}
+
+APR_DECLARE(void) apr_hash_clear(apr_hash_t *ht)
+{
+ apr_hash_index_t *hi;
+ for (hi = apr_hash_first(NULL, ht); hi; hi = apr_hash_next(hi))
+ apr_hash_set(ht, hi->this->key, hi->this->klen, NULL);
+}
+
+APR_DECLARE(apr_hash_t*) apr_hash_overlay(apr_pool_t *p,
+ const apr_hash_t *overlay,
+ const apr_hash_t *base)
+{
+ return apr_hash_merge(p, overlay, base, NULL, NULL);
+}
+
+APR_DECLARE(apr_hash_t *) apr_hash_merge(apr_pool_t *p,
+ const apr_hash_t *overlay,
+ const apr_hash_t *base,
+ void * (*merger)(apr_pool_t *p,
+ const void *key,
+ apr_ssize_t klen,
+ const void *h1_val,
+ const void *h2_val,
+ const void *data),
+ const void *data)
+{
+ apr_hash_t *res;
+ apr_hash_entry_t *new_vals = NULL;
+ apr_hash_entry_t *iter;
+ apr_hash_entry_t *ent;
+ unsigned int i, j, k, hash;
+
+#if APR_POOL_DEBUG
+ /* we don't copy keys and values, so it's necessary that
+ * overlay->a.pool and base->a.pool have a life span at least
+ * as long as p
+ */
+ if (!apr_pool_is_ancestor(overlay->pool, p)) {
+ fprintf(stderr,
+ "apr_hash_merge: overlay's pool is not an ancestor of p\n");
+ abort();
+ }
+ if (!apr_pool_is_ancestor(base->pool, p)) {
+ fprintf(stderr,
+ "apr_hash_merge: base's pool is not an ancestor of p\n");
+ abort();
+ }
+#endif
+
+ res = apr_palloc(p, sizeof(apr_hash_t));
+ res->pool = p;
+ res->free = NULL;
+ res->hash_func = base->hash_func;
+ res->count = base->count;
+ res->max = (overlay->max > base->max) ? overlay->max : base->max;
+ if (base->count + overlay->count > res->max) {
+ res->max = res->max * 2 + 1;
+ }
+ res->seed = base->seed;
+ res->array = alloc_array(res, res->max);
+ if (base->count + overlay->count) {
+ new_vals = apr_palloc(p, sizeof(apr_hash_entry_t) *
+ (base->count + overlay->count));
+ }
+ j = 0;
+ for (k = 0; k <= base->max; k++) {
+ for (iter = base->array[k]; iter; iter = iter->next) {
+ i = iter->hash & res->max;
+ new_vals[j].klen = iter->klen;
+ new_vals[j].key = iter->key;
+ new_vals[j].val = iter->val;
+ new_vals[j].hash = iter->hash;
+ new_vals[j].next = res->array[i];
+ res->array[i] = &new_vals[j];
+ j++;
+ }
+ }
+
+ for (k = 0; k <= overlay->max; k++) {
+ for (iter = overlay->array[k]; iter; iter = iter->next) {
+ if (res->hash_func)
+ hash = res->hash_func(iter->key, &iter->klen);
+ else
+ hash = hashfunc_default(iter->key, &iter->klen, res->seed);
+ i = hash & res->max;
+ for (ent = res->array[i]; ent; ent = ent->next) {
+ if ((ent->klen == iter->klen) &&
+ (memcmp(ent->key, iter->key, iter->klen) == 0)) {
+ if (merger) {
+ ent->val = (*merger)(p, iter->key, iter->klen,
+ iter->val, ent->val, data);
+ }
+ else {
+ ent->val = iter->val;
+ }
+ break;
+ }
+ }
+ if (!ent) {
+ new_vals[j].klen = iter->klen;
+ new_vals[j].key = iter->key;
+ new_vals[j].val = iter->val;
+ new_vals[j].hash = hash;
+ new_vals[j].next = res->array[i];
+ res->array[i] = &new_vals[j];
+ res->count++;
+ j++;
+ }
+ }
+ }
+ return res;
+}
+
+/* This is basically the following...
+ * for every element in hash table {
+ * comp elemeny.key, element.value
+ * }
+ *
+ * Like with apr_table_do, the comp callback is called for each and every
+ * element of the hash table.
+ */
+APR_DECLARE(int) apr_hash_do(apr_hash_do_callback_fn_t *comp,
+ void *rec, const apr_hash_t *ht)
+{
+ apr_hash_index_t hix;
+ apr_hash_index_t *hi;
+ int rv, dorv = 1;
+
+ hix.ht = (apr_hash_t *)ht;
+ hix.index = 0;
+ hix.this = NULL;
+ hix.next = NULL;
+
+ if ((hi = apr_hash_next(&hix))) {
+ /* Scan the entire table */
+ do {
+ rv = (*comp)(rec, hi->this->key, hi->this->klen, hi->this->val);
+ } while (rv && (hi = apr_hash_next(hi)));
+
+ if (rv == 0) {
+ dorv = 0;
+ }
+ }
+ return dorv;
+}
+
+APR_POOL_IMPLEMENT_ACCESSOR(hash)
diff --git a/tables/apr_skiplist.c b/tables/apr_skiplist.c
new file mode 100644
index 0000000..8013ed7
--- /dev/null
+++ b/tables/apr_skiplist.c
@@ -0,0 +1,864 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Modified to use APR and APR pools.
+ * TODO: Is malloc() better? Will long running skiplists grow too much?
+ * Keep the skiplist_alloc() and skiplist_free() until we know
+ * Yeah, if using pools it means some bogus cycles for checks
+ * (and an useless function call for skiplist_free) which we
+ * can removed if/when needed.
+ */
+
+#include "apr_skiplist.h"
+
+typedef struct {
+ apr_skiplistnode **data;
+ size_t size, pos;
+ apr_pool_t *p;
+} apr_skiplist_q;
+
+struct apr_skiplist {
+ apr_skiplist_compare compare;
+ apr_skiplist_compare comparek;
+ int height;
+ int preheight;
+ size_t size;
+ apr_skiplistnode *top;
+ apr_skiplistnode *bottom;
+ /* These two are needed for appending */
+ apr_skiplistnode *topend;
+ apr_skiplistnode *bottomend;
+ apr_skiplist *index;
+ apr_array_header_t *memlist;
+ apr_skiplist_q nodes_q,
+ stack_q;
+ apr_pool_t *pool;
+};
+
+struct apr_skiplistnode {
+ void *data;
+ apr_skiplistnode *next;
+ apr_skiplistnode *prev;
+ apr_skiplistnode *down;
+ apr_skiplistnode *up;
+ apr_skiplistnode *previndex;
+ apr_skiplistnode *nextindex;
+ apr_skiplist *sl;
+};
+
+static unsigned int get_b_rand(void)
+{
+ static unsigned int ph = 32; /* More bits than we will ever use */
+ static unsigned int randseq;
+ if (ph > 31) { /* Num bits in return of rand() */
+ ph = 0;
+ randseq = rand();
+ }
+ return randseq & (1U << ph++);
+}
+
+typedef struct {
+ size_t size;
+ apr_array_header_t *list;
+} memlist_t;
+
+typedef struct {
+ void *ptr;
+ char inuse;
+} chunk_t;
+
+APR_DECLARE(void *) apr_skiplist_alloc(apr_skiplist *sl, size_t size)
+{
+ if (sl->pool) {
+ void *ptr;
+ int found_size = 0;
+ int i;
+ chunk_t *newchunk;
+ memlist_t *memlist = (memlist_t *)sl->memlist->elts;
+ for (i = 0; i < sl->memlist->nelts; i++) {
+ if (memlist->size == size) {
+ int j;
+ chunk_t *chunk = (chunk_t *)memlist->list->elts;
+ found_size = 1;
+ for (j = 0; j < memlist->list->nelts; j++) {
+ if (!chunk->inuse) {
+ chunk->inuse = 1;
+ return chunk->ptr;
+ }
+ chunk++;
+ }
+ break; /* no free of this size; punt */
+ }
+ memlist++;
+ }
+ /* no free chunks */
+ ptr = apr_palloc(sl->pool, size);
+ if (!ptr) {
+ return ptr;
+ }
+ /*
+ * is this a new sized chunk? If so, we need to create a new
+ * array of them. Otherwise, re-use what we already have.
+ */
+ if (!found_size) {
+ memlist = apr_array_push(sl->memlist);
+ memlist->size = size;
+ memlist->list = apr_array_make(sl->pool, 20, sizeof(chunk_t));
+ }
+ newchunk = apr_array_push(memlist->list);
+ newchunk->ptr = ptr;
+ newchunk->inuse = 1;
+ return ptr;
+ }
+ else {
+ return malloc(size);
+ }
+}
+
+APR_DECLARE(void) apr_skiplist_free(apr_skiplist *sl, void *mem)
+{
+ if (!sl->pool) {
+ free(mem);
+ }
+ else {
+ int i;
+ memlist_t *memlist = (memlist_t *)sl->memlist->elts;
+ for (i = 0; i < sl->memlist->nelts; i++) {
+ int j;
+ chunk_t *chunk = (chunk_t *)memlist->list->elts;
+ for (j = 0; j < memlist->list->nelts; j++) {
+ if (chunk->ptr == mem) {
+ chunk->inuse = 0;
+ return;
+ }
+ chunk++;
+ }
+ memlist++;
+ }
+ }
+}
+
+static apr_status_t skiplist_qpush(apr_skiplist_q *q, apr_skiplistnode *m)
+{
+ if (q->pos >= q->size) {
+ apr_skiplistnode **data;
+ size_t size = (q->pos) ? q->pos * 2 : 32;
+ if (q->p) {
+ data = apr_palloc(q->p, size * sizeof(*data));
+ if (data && q->data) {
+ memcpy(data, q->data, q->pos * sizeof(*data));
+ }
+ }
+ else {
+ data = realloc(q->data, size * sizeof(*data));
+ }
+ if (!data) {
+ return APR_ENOMEM;
+ }
+ q->data = data;
+ q->size = size;
+ }
+ q->data[q->pos++] = m;
+ return APR_SUCCESS;
+}
+
+static APR_INLINE apr_skiplistnode *skiplist_qpop(apr_skiplist_q *q)
+{
+ return (q->pos > 0) ? q->data[--q->pos] : NULL;
+}
+
+static APR_INLINE void skiplist_qclear(apr_skiplist_q *q)
+{
+ q->pos = 0;
+}
+
+static apr_skiplistnode *skiplist_new_node(apr_skiplist *sl)
+{
+ apr_skiplistnode *m = skiplist_qpop(&sl->nodes_q);
+ if (!m) {
+ if (sl->pool) {
+ m = apr_palloc(sl->pool, sizeof *m);
+ }
+ else {
+ m = malloc(sizeof *m);
+ }
+ }
+ return m;
+}
+
+static apr_status_t skiplist_put_node(apr_skiplist *sl, apr_skiplistnode *m)
+{
+ return skiplist_qpush(&sl->nodes_q, m);
+}
+
+static apr_status_t skiplisti_init(apr_skiplist **s, apr_pool_t *p)
+{
+ apr_skiplist *sl;
+ if (p) {
+ sl = apr_pcalloc(p, sizeof(apr_skiplist));
+ sl->memlist = apr_array_make(p, 20, sizeof(memlist_t));
+ sl->pool = sl->nodes_q.p = sl->stack_q.p = p;
+ }
+ else {
+ sl = calloc(1, sizeof(apr_skiplist));
+ if (!sl) {
+ return APR_ENOMEM;
+ }
+ }
+ *s = sl;
+ return APR_SUCCESS;
+}
+
+static int indexing_comp(void *a, void *b)
+{
+ void *ac = (void *) (((apr_skiplist *) a)->compare);
+ void *bc = (void *) (((apr_skiplist *) b)->compare);
+ return ((ac < bc) ? -1 : ((ac > bc) ? 1 : 0));
+}
+
+static int indexing_compk(void *ac, void *b)
+{
+ void *bc = (void *) (((apr_skiplist *) b)->compare);
+ return ((ac < bc) ? -1 : ((ac > bc) ? 1 : 0));
+}
+
+APR_DECLARE(apr_status_t) apr_skiplist_init(apr_skiplist **s, apr_pool_t *p)
+{
+ apr_status_t rv;
+ apr_skiplist *sl;
+ rv = skiplisti_init(&sl, p);
+ if (rv != APR_SUCCESS) {
+ *s = NULL;
+ return rv;
+ }
+ rv = skiplisti_init(&sl->index, p);
+ if (rv != APR_SUCCESS) {
+ *s = NULL;
+ return rv;
+ }
+ apr_skiplist_set_compare(sl->index, indexing_comp, indexing_compk);
+ *s = sl;
+ return APR_SUCCESS;
+}
+
+APR_DECLARE(void) apr_skiplist_set_compare(apr_skiplist *sl,
+ apr_skiplist_compare comp,
+ apr_skiplist_compare compk)
+{
+ if (sl->compare && sl->comparek) {
+ apr_skiplist_add_index(sl, comp, compk);
+ }
+ else {
+ sl->compare = comp;
+ sl->comparek = compk;
+ }
+}
+
+APR_DECLARE(void) apr_skiplist_add_index(apr_skiplist *sl,
+ apr_skiplist_compare comp,
+ apr_skiplist_compare compk)
+{
+ apr_skiplistnode *m;
+ apr_skiplist *ni;
+ int icount = 0;
+ apr_skiplist_find(sl->index, (void *)comp, &m);
+ if (m) {
+ return; /* Index already there! */
+ }
+ if (skiplisti_init(&ni, sl->pool) != APR_SUCCESS) {
+ abort();
+ return;
+ }
+ apr_skiplist_set_compare(ni, comp, compk);
+ /* Build the new index... This can be expensive! */
+ m = apr_skiplist_insert(sl->index, ni);
+ while (m->prev) {
+ m = m->prev;
+ icount++;
+ }
+ for (m = apr_skiplist_getlist(sl); m; apr_skiplist_next(sl, &m)) {
+ int j = icount - 1;
+ apr_skiplistnode *nsln;
+ nsln = apr_skiplist_insert(ni, m->data);
+ /* skip from main index down list */
+ while (j > 0) {
+ m = m->nextindex;
+ j--;
+ }
+ /* insert this node in the indexlist after m */
+ nsln->nextindex = m->nextindex;
+ if (m->nextindex) {
+ m->nextindex->previndex = nsln;
+ }
+ nsln->previndex = m;
+ m->nextindex = nsln;
+ }
+}
+
+static int skiplisti_find_compare(apr_skiplist *sl, void *data,
+ apr_skiplistnode **ret,
+ apr_skiplist_compare comp,
+ int last)
+{
+ int count = 0;
+ apr_skiplistnode *m, *found = NULL;
+ for (m = sl->top; m; count++) {
+ if (m->next) {
+ int compared = comp(data, m->next->data);
+ if (compared == 0) {
+ found = m = m->next;
+ if (!last) {
+ break;
+ }
+ continue;
+ }
+ if (compared > 0) {
+ m = m->next;
+ continue;
+ }
+ }
+ m = m->down;
+ }
+ if (found) {
+ while (found->down) {
+ found = found->down;
+ }
+ *ret = found;
+ }
+ else {
+ *ret = NULL;
+ }
+ return count;
+}
+
+static void *find_compare(apr_skiplist *sli, void *data,
+ apr_skiplistnode **iter,
+ apr_skiplist_compare comp,
+ int last)
+{
+ apr_skiplistnode *m;
+ apr_skiplist *sl;
+ if (!comp) {
+ if (iter) {
+ *iter = NULL;
+ }
+ return NULL;
+ }
+ if (comp == sli->compare || !sli->index) {
+ sl = sli;
+ }
+ else {
+ apr_skiplist_find(sli->index, (void *)comp, &m);
+ if (!m) {
+ if (iter) {
+ *iter = NULL;
+ }
+ return NULL;
+ }
+ sl = (apr_skiplist *) m->data;
+ }
+ skiplisti_find_compare(sl, data, &m, sl->comparek, last);
+ if (iter) {
+ *iter = m;
+ }
+ return (m) ? m->data : NULL;
+}
+
+APR_DECLARE(void *) apr_skiplist_find_compare(apr_skiplist *sl, void *data,
+ apr_skiplistnode **iter,
+ apr_skiplist_compare comp)
+{
+ return find_compare(sl, data, iter, comp, 0);
+}
+
+APR_DECLARE(void *) apr_skiplist_find(apr_skiplist *sl, void *data, apr_skiplistnode **iter)
+{
+ return find_compare(sl, data, iter, sl->compare, 0);
+}
+
+APR_DECLARE(void *) apr_skiplist_last_compare(apr_skiplist *sl, void *data,
+ apr_skiplistnode **iter,
+ apr_skiplist_compare comp)
+{
+ return find_compare(sl, data, iter, comp, 1);
+}
+
+APR_DECLARE(void *) apr_skiplist_last(apr_skiplist *sl, void *data,
+ apr_skiplistnode **iter)
+{
+ return find_compare(sl, data, iter, sl->compare, 1);
+}
+
+
+APR_DECLARE(apr_skiplistnode *) apr_skiplist_getlist(apr_skiplist *sl)
+{
+ if (!sl->bottom) {
+ return NULL;
+ }
+ return sl->bottom->next;
+}
+
+APR_DECLARE(void *) apr_skiplist_next(apr_skiplist *sl, apr_skiplistnode **iter)
+{
+ if (!*iter) {
+ return NULL;
+ }
+ *iter = (*iter)->next;
+ return (*iter) ? ((*iter)->data) : NULL;
+}
+
+APR_DECLARE(void *) apr_skiplist_previous(apr_skiplist *sl, apr_skiplistnode **iter)
+{
+ if (!*iter) {
+ return NULL;
+ }
+ *iter = (*iter)->prev;
+ return (*iter) ? ((*iter)->data) : NULL;
+}
+
+APR_DECLARE(void *) apr_skiplist_element(apr_skiplistnode *iter)
+{
+ return (iter) ? iter->data : NULL;
+}
+
+/* forward declared */
+static int skiplisti_remove(apr_skiplist *sl, apr_skiplistnode *m,
+ apr_skiplist_freefunc myfree);
+
+static APR_INLINE int skiplist_height(const apr_skiplist *sl)
+{
+ /* Skiplists (even empty) always have a top node, although this
+ * implementation defers its creation until the first insert, or
+ * deletes it with the last remove. We want the real height here.
+ */
+ return sl->height ? sl->height : 1;
+}
+
+static apr_skiplistnode *insert_compare(apr_skiplist *sl, void *data,
+ apr_skiplist_compare comp, int add,
+ apr_skiplist_freefunc myfree)
+{
+ apr_skiplistnode *m, *p, *tmp, *ret = NULL;
+ int ch, top_nh, nh = 1;
+
+ ch = skiplist_height(sl);
+ if (sl->preheight) {
+ while (nh < sl->preheight && get_b_rand()) {
+ nh++;
+ }
+ }
+ else {
+ while (nh <= ch && get_b_rand()) {
+ nh++;
+ }
+ }
+ top_nh = nh;
+
+ /* Now we have in nh the height at which we wish to insert our new node,
+ * and in ch the current height: don't create skip paths to the inserted
+ * element until the walk down through the tree (which decrements ch)
+ * reaches nh. From there, any walk down pushes the current node on a
+ * stack (the node(s) after which we would insert) to pop back through
+ * for insertion later.
+ */
+ m = sl->top;
+ while (m) {
+ /*
+ * To maintain stability, dups (compared == 0) must be added
+ * AFTER each other.
+ */
+ if (m->next) {
+ int compared = comp(data, m->next->data);
+ if (compared == 0) {
+ if (!add) {
+ /* Keep the existing element(s) */
+ skiplist_qclear(&sl->stack_q);
+ return NULL;
+ }
+ if (add < 0) {
+ /* Remove this element and continue with the next node
+ * or the new top if the current one is also removed.
+ */
+ apr_skiplistnode *top = sl->top;
+ skiplisti_remove(sl, m->next, myfree);
+ if (top != sl->top) {
+ m = sl->top;
+ skiplist_qclear(&sl->stack_q);
+ ch = skiplist_height(sl);
+ nh = top_nh;
+ }
+ continue;
+ }
+ }
+ if (compared >= 0) {
+ m = m->next;
+ continue;
+ }
+ }
+ if (ch <= nh) {
+ /* push on stack */
+ skiplist_qpush(&sl->stack_q, m);
+ }
+ m = m->down;
+ ch--;
+ }
+ /* Pop the stack and insert nodes */
+ p = NULL;
+ while ((m = skiplist_qpop(&sl->stack_q))) {
+ tmp = skiplist_new_node(sl);
+ tmp->next = m->next;
+ if (m->next) {
+ m->next->prev = tmp;
+ }
+ m->next = tmp;
+ tmp->prev = m;
+ tmp->up = NULL;
+ tmp->nextindex = tmp->previndex = NULL;
+ tmp->down = p;
+ if (p) {
+ p->up = tmp;
+ }
+ else {
+ /* This sets ret to the bottom-most node we are inserting */
+ ret = tmp;
+ }
+ tmp->data = data;
+ tmp->sl = sl;
+ p = tmp;
+ }
+
+ /* Now we are sure the node is inserted, grow our tree to 'nh' tall */
+ for (; sl->height < nh; sl->height++) {
+ m = skiplist_new_node(sl);
+ tmp = skiplist_new_node(sl);
+ m->up = m->prev = m->nextindex = m->previndex = NULL;
+ m->next = tmp;
+ m->down = sl->top;
+ m->data = NULL;
+ m->sl = sl;
+ if (sl->top) {
+ sl->top->up = m;
+ }
+ else {
+ sl->bottom = sl->bottomend = m;
+ }
+ sl->top = sl->topend = tmp->prev = m;
+ tmp->up = tmp->next = tmp->nextindex = tmp->previndex = NULL;
+ tmp->down = p;
+ tmp->data = data;
+ tmp->sl = sl;
+ if (p) {
+ p->up = tmp;
+ }
+ else {
+ /* This sets ret to the bottom-most node we are inserting */
+ ret = tmp;
+ }
+ p = tmp;
+ }
+ if (sl->index != NULL) {
+ /*
+ * this is a external insertion, we must insert into each index as
+ * well
+ */
+ apr_skiplistnode *ni, *li;
+ li = ret;
+ for (p = apr_skiplist_getlist(sl->index); p; apr_skiplist_next(sl->index, &p)) {
+ apr_skiplist *sli = (apr_skiplist *)p->data;
+ ni = insert_compare(sli, ret->data, sli->compare, 1, NULL);
+ li->nextindex = ni;
+ ni->previndex = li;
+ li = ni;
+ }
+ }
+ sl->size++;
+ return ret;
+}
+
+APR_DECLARE(apr_skiplistnode *) apr_skiplist_insert_compare(apr_skiplist *sl, void *data,
+ apr_skiplist_compare comp)
+{
+ if (!comp) {
+ return NULL;
+ }
+ return insert_compare(sl, data, comp, 0, NULL);
+}
+
+APR_DECLARE(apr_skiplistnode *) apr_skiplist_insert(apr_skiplist *sl, void *data)
+{
+ return apr_skiplist_insert_compare(sl, data, sl->compare);
+}
+
+APR_DECLARE(apr_skiplistnode *) apr_skiplist_add_compare(apr_skiplist *sl, void *data,
+ apr_skiplist_compare comp)
+{
+ if (!comp) {
+ return NULL;
+ }
+ return insert_compare(sl, data, comp, 1, NULL);
+}
+
+APR_DECLARE(apr_skiplistnode *) apr_skiplist_add(apr_skiplist *sl, void *data)
+{
+ return apr_skiplist_add_compare(sl, data, sl->compare);
+}
+
+APR_DECLARE(apr_skiplistnode *) apr_skiplist_replace_compare(apr_skiplist *sl,
+ void *data, apr_skiplist_freefunc myfree,
+ apr_skiplist_compare comp)
+{
+ if (!comp) {
+ return NULL;
+ }
+ return insert_compare(sl, data, comp, -1, myfree);
+}
+
+APR_DECLARE(apr_skiplistnode *) apr_skiplist_replace(apr_skiplist *sl,
+ void *data, apr_skiplist_freefunc myfree)
+{
+ return apr_skiplist_replace_compare(sl, data, myfree, sl->compare);
+}
+
+#if 0
+void skiplist_print_struct(apr_skiplist * sl, char *prefix)
+{
+ apr_skiplistnode *p, *q;
+ fprintf(stderr, "Skiplist Structure (height: %d)\n", sl->height);
+ p = sl->bottom;
+ while (p) {
+ q = p;
+ fprintf(stderr, prefix);
+ while (q) {
+ fprintf(stderr, "%p ", q->data);
+ q = q->up;
+ }
+ fprintf(stderr, "\n");
+ p = p->next;
+ }
+}
+#endif
+
+static int skiplisti_remove(apr_skiplist *sl, apr_skiplistnode *m,
+ apr_skiplist_freefunc myfree)
+{
+ apr_skiplistnode *p;
+ if (!m) {
+ return 0;
+ }
+ if (m->nextindex) {
+ skiplisti_remove(m->nextindex->sl, m->nextindex, NULL);
+ }
+ while (m->up) {
+ m = m->up;
+ }
+ do {
+ p = m;
+ /* take me out of the list */
+ p->prev->next = p->next;
+ if (p->next) {
+ p->next->prev = p->prev;
+ }
+ m = m->down;
+ /* This only frees the actual data in the bottom one */
+ if (!m && myfree && p->data) {
+ myfree(p->data);
+ }
+ skiplist_put_node(sl, p);
+ } while (m);
+ sl->size--;
+ while (sl->top && sl->top->next == NULL) {
+ /* While the row is empty and we are not on the bottom row */
+ p = sl->top;
+ sl->top = sl->top->down;/* Move top down one */
+ if (sl->top) {
+ sl->top->up = NULL; /* Make it think its the top */
+ }
+ skiplist_put_node(sl, p);
+ sl->height--;
+ }
+ if (!sl->top) {
+ sl->bottom = sl->bottomend = NULL;
+ sl->topend = NULL;
+ }
+ return skiplist_height(sl);
+}
+
+APR_DECLARE(int) apr_skiplist_remove_node(apr_skiplist *sl,
+ apr_skiplistnode *iter,
+ apr_skiplist_freefunc myfree)
+{
+ apr_skiplistnode *m = iter;
+ if (!m) {
+ return 0;
+ }
+ while (m->down) {
+ m = m->down;
+ }
+ while (m->previndex) {
+ m = m->previndex;
+ }
+ return skiplisti_remove(sl, m, myfree);
+}
+
+APR_DECLARE(int) apr_skiplist_remove_compare(apr_skiplist *sli,
+ void *data,
+ apr_skiplist_freefunc myfree, apr_skiplist_compare comp)
+{
+ apr_skiplistnode *m;
+ apr_skiplist *sl;
+ if (!comp) {
+ return 0;
+ }
+ if (comp == sli->comparek || !sli->index) {
+ sl = sli;
+ }
+ else {
+ apr_skiplist_find(sli->index, (void *)comp, &m);
+ if (!m) {
+ return 0;
+ }
+ sl = (apr_skiplist *) m->data;
+ }
+ skiplisti_find_compare(sl, data, &m, comp, 0);
+ if (!m) {
+ return 0;
+ }
+ while (m->previndex) {
+ m = m->previndex;
+ }
+ return skiplisti_remove(sl, m, myfree);
+}
+
+APR_DECLARE(int) apr_skiplist_remove(apr_skiplist *sl, void *data, apr_skiplist_freefunc myfree)
+{
+ return apr_skiplist_remove_compare(sl, data, myfree, sl->comparek);
+}
+
+APR_DECLARE(void) apr_skiplist_remove_all(apr_skiplist *sl, apr_skiplist_freefunc myfree)
+{
+ /*
+ * This must remove even the place holder nodes (bottom though top)
+ * because we specify in the API that one can free the Skiplist after
+ * making this call without memory leaks
+ */
+ apr_skiplistnode *m, *p, *u;
+ m = sl->bottom;
+ while (m) {
+ p = m->next;
+ if (myfree && p && p->data) {
+ myfree(p->data);
+ }
+ do {
+ u = m->up;
+ skiplist_put_node(sl, m);
+ m = u;
+ } while (m);
+ m = p;
+ }
+ sl->top = sl->bottom = NULL;
+ sl->topend = sl->bottomend = NULL;
+ sl->height = 0;
+ sl->size = 0;
+}
+
+APR_DECLARE(void *) apr_skiplist_pop(apr_skiplist *a, apr_skiplist_freefunc myfree)
+{
+ apr_skiplistnode *sln;
+ void *data = NULL;
+ sln = apr_skiplist_getlist(a);
+ if (sln) {
+ data = sln->data;
+ skiplisti_remove(a, sln, myfree);
+ }
+ return data;
+}
+
+APR_DECLARE(void *) apr_skiplist_peek(apr_skiplist *a)
+{
+ apr_skiplistnode *sln;
+ sln = apr_skiplist_getlist(a);
+ if (sln) {
+ return sln->data;
+ }
+ return NULL;
+}
+
+APR_DECLARE(size_t) apr_skiplist_size(const apr_skiplist *sl)
+{
+ return sl->size;
+}
+
+APR_DECLARE(int) apr_skiplist_height(const apr_skiplist *sl)
+{
+ return skiplist_height(sl);
+}
+
+APR_DECLARE(int) apr_skiplist_preheight(const apr_skiplist *sl)
+{
+ return sl->preheight;
+}
+
+APR_DECLARE(void) apr_skiplist_set_preheight(apr_skiplist *sl, int to)
+{
+ sl->preheight = (to > 0) ? to : 0;
+}
+
+static void skiplisti_destroy(void *vsl)
+{
+ apr_skiplist_destroy(vsl, NULL);
+}
+
+APR_DECLARE(void) apr_skiplist_destroy(apr_skiplist *sl, apr_skiplist_freefunc myfree)
+{
+ while (apr_skiplist_pop(sl->index, skiplisti_destroy) != NULL)
+ ;
+ apr_skiplist_remove_all(sl, myfree);
+ if (!sl->pool) {
+ while (sl->nodes_q.pos)
+ free(sl->nodes_q.data[--sl->nodes_q.pos]);
+ free(sl->nodes_q.data);
+ free(sl->stack_q.data);
+ free(sl);
+ }
+}
+
+APR_DECLARE(apr_skiplist *) apr_skiplist_merge(apr_skiplist *sl1, apr_skiplist *sl2)
+{
+ /* Check integrity! */
+ apr_skiplist temp;
+ struct apr_skiplistnode *b2;
+ if (sl1->bottomend == NULL || sl1->bottomend->prev == NULL) {
+ apr_skiplist_remove_all(sl1, NULL);
+ temp = *sl1;
+ *sl1 = *sl2;
+ *sl2 = temp;
+ /* swap them so that sl2 can be freed normally upon return. */
+ return sl1;
+ }
+ if(sl2->bottom == NULL || sl2->bottom->next == NULL) {
+ apr_skiplist_remove_all(sl2, NULL);
+ return sl1;
+ }
+ /* This is what makes it brute force... Just insert :/ */
+ b2 = apr_skiplist_getlist(sl2);
+ while (b2) {
+ apr_skiplist_insert(sl1, b2->data);
+ apr_skiplist_next(sl2, &b2);
+ }
+ apr_skiplist_remove_all(sl2, NULL);
+ return sl1;
+}
diff --git a/tables/apr_tables.c b/tables/apr_tables.c
new file mode 100644
index 0000000..9dc594c
--- /dev/null
+++ b/tables/apr_tables.c
@@ -0,0 +1,1300 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Resource allocation code... the code here is responsible for making
+ * sure that nothing leaks.
+ *
+ * rst --- 4/95 --- 6/95
+ */
+
+#include "apr_private.h"
+
+#include "apr_general.h"
+#include "apr_pools.h"
+#include "apr_tables.h"
+#include "apr_strings.h"
+#include "apr_lib.h"
+#if APR_HAVE_STDLIB_H
+#include <stdlib.h>
+#endif
+#if APR_HAVE_STRING_H
+#include <string.h>
+#endif
+#if APR_HAVE_STRINGS_H
+#include <strings.h>
+#endif
+
+#if (APR_POOL_DEBUG || defined(MAKE_TABLE_PROFILE)) && APR_HAVE_STDIO_H
+#include <stdio.h>
+#endif
+
+/*****************************************************************
+ * This file contains array and apr_table_t functions only.
+ */
+
+/*****************************************************************
+ *
+ * The 'array' functions...
+ */
+
+static void make_array_core(apr_array_header_t *res, apr_pool_t *p,
+ int nelts, int elt_size, int clear)
+{
+ /*
+ * Assure sanity if someone asks for
+ * array of zero elts.
+ */
+ if (nelts < 1) {
+ nelts = 1;
+ }
+
+ if (clear) {
+ res->elts = apr_pcalloc(p, nelts * elt_size);
+ }
+ else {
+ res->elts = apr_palloc(p, nelts * elt_size);
+ }
+
+ res->pool = p;
+ res->elt_size = elt_size;
+ res->nelts = 0; /* No active elements yet... */
+ res->nalloc = nelts; /* ...but this many allocated */
+}
+
+APR_DECLARE(int) apr_is_empty_array(const apr_array_header_t *a)
+{
+ return ((a == NULL) || (a->nelts == 0));
+}
+
+APR_DECLARE(apr_array_header_t *) apr_array_make(apr_pool_t *p,
+ int nelts, int elt_size)
+{
+ apr_array_header_t *res;
+
+ res = (apr_array_header_t *) apr_palloc(p, sizeof(apr_array_header_t));
+ make_array_core(res, p, nelts, elt_size, 1);
+ return res;
+}
+
+APR_DECLARE(void) apr_array_clear(apr_array_header_t *arr)
+{
+ arr->nelts = 0;
+}
+
+APR_DECLARE(void *) apr_array_pop(apr_array_header_t *arr)
+{
+ if (apr_is_empty_array(arr)) {
+ return NULL;
+ }
+
+ return arr->elts + (arr->elt_size * (--arr->nelts));
+}
+
+APR_DECLARE(void *) apr_array_push(apr_array_header_t *arr)
+{
+ if (arr->nelts == arr->nalloc) {
+ int new_size = (arr->nalloc <= 0) ? 1 : arr->nalloc * 2;
+ char *new_data;
+
+ new_data = apr_palloc(arr->pool, arr->elt_size * new_size);
+
+ memcpy(new_data, arr->elts, arr->nalloc * arr->elt_size);
+ memset(new_data + arr->nalloc * arr->elt_size, 0,
+ arr->elt_size * (new_size - arr->nalloc));
+ arr->elts = new_data;
+ arr->nalloc = new_size;
+ }
+
+ ++arr->nelts;
+ return arr->elts + (arr->elt_size * (arr->nelts - 1));
+}
+
+static void *apr_array_push_noclear(apr_array_header_t *arr)
+{
+ if (arr->nelts == arr->nalloc) {
+ int new_size = (arr->nalloc <= 0) ? 1 : arr->nalloc * 2;
+ char *new_data;
+
+ new_data = apr_palloc(arr->pool, arr->elt_size * new_size);
+
+ memcpy(new_data, arr->elts, arr->nalloc * arr->elt_size);
+ arr->elts = new_data;
+ arr->nalloc = new_size;
+ }
+
+ ++arr->nelts;
+ return arr->elts + (arr->elt_size * (arr->nelts - 1));
+}
+
+APR_DECLARE(void) apr_array_cat(apr_array_header_t *dst,
+ const apr_array_header_t *src)
+{
+ int elt_size = dst->elt_size;
+
+ if (dst->nelts + src->nelts > dst->nalloc) {
+ int new_size = (dst->nalloc <= 0) ? 1 : dst->nalloc * 2;
+ char *new_data;
+
+ while (dst->nelts + src->nelts > new_size) {
+ new_size *= 2;
+ }
+
+ new_data = apr_pcalloc(dst->pool, elt_size * new_size);
+ memcpy(new_data, dst->elts, dst->nalloc * elt_size);
+
+ dst->elts = new_data;
+ dst->nalloc = new_size;
+ }
+
+ memcpy(dst->elts + dst->nelts * elt_size, src->elts,
+ elt_size * src->nelts);
+ dst->nelts += src->nelts;
+}
+
+APR_DECLARE(apr_array_header_t *) apr_array_copy(apr_pool_t *p,
+ const apr_array_header_t *arr)
+{
+ apr_array_header_t *res =
+ (apr_array_header_t *) apr_palloc(p, sizeof(apr_array_header_t));
+ make_array_core(res, p, arr->nalloc, arr->elt_size, 0);
+
+ memcpy(res->elts, arr->elts, arr->elt_size * arr->nelts);
+ res->nelts = arr->nelts;
+ memset(res->elts + res->elt_size * res->nelts, 0,
+ res->elt_size * (res->nalloc - res->nelts));
+ return res;
+}
+
+/* This cute function copies the array header *only*, but arranges
+ * for the data section to be copied on the first push or arraycat.
+ * It's useful when the elements of the array being copied are
+ * read only, but new stuff *might* get added on the end; we have the
+ * overhead of the full copy only where it is really needed.
+ */
+
+static APR_INLINE void copy_array_hdr_core(apr_array_header_t *res,
+ const apr_array_header_t *arr)
+{
+ res->elts = arr->elts;
+ res->elt_size = arr->elt_size;
+ res->nelts = arr->nelts;
+ res->nalloc = arr->nelts; /* Force overflow on push */
+}
+
+APR_DECLARE(apr_array_header_t *)
+ apr_array_copy_hdr(apr_pool_t *p,
+ const apr_array_header_t *arr)
+{
+ apr_array_header_t *res;
+
+ res = (apr_array_header_t *) apr_palloc(p, sizeof(apr_array_header_t));
+ res->pool = p;
+ copy_array_hdr_core(res, arr);
+ return res;
+}
+
+/* The above is used here to avoid consing multiple new array bodies... */
+
+APR_DECLARE(apr_array_header_t *)
+ apr_array_append(apr_pool_t *p,
+ const apr_array_header_t *first,
+ const apr_array_header_t *second)
+{
+ apr_array_header_t *res = apr_array_copy_hdr(p, first);
+
+ apr_array_cat(res, second);
+ return res;
+}
+
+/* apr_array_pstrcat generates a new string from the apr_pool_t containing
+ * the concatenated sequence of substrings referenced as elements within
+ * the array. The string will be empty if all substrings are empty or null,
+ * or if there are no elements in the array.
+ * If sep is non-NUL, it will be inserted between elements as a separator.
+ */
+APR_DECLARE(char *) apr_array_pstrcat(apr_pool_t *p,
+ const apr_array_header_t *arr,
+ const char sep)
+{
+ char *cp, *res, **strpp;
+ apr_size_t len;
+ int i;
+
+ if (arr->nelts <= 0 || arr->elts == NULL) { /* Empty table? */
+ return (char *) apr_pcalloc(p, 1);
+ }
+
+ /* Pass one --- find length of required string */
+
+ len = 0;
+ for (i = 0, strpp = (char **) arr->elts; ; ++strpp) {
+ if (strpp && *strpp != NULL) {
+ len += strlen(*strpp);
+ }
+ if (++i >= arr->nelts) {
+ break;
+ }
+ if (sep) {
+ ++len;
+ }
+ }
+
+ /* Allocate the required string */
+
+ res = (char *) apr_palloc(p, len + 1);
+ cp = res;
+
+ /* Pass two --- copy the argument strings into the result space */
+
+ for (i = 0, strpp = (char **) arr->elts; ; ++strpp) {
+ if (strpp && *strpp != NULL) {
+ len = strlen(*strpp);
+ memcpy(cp, *strpp, len);
+ cp += len;
+ }
+ if (++i >= arr->nelts) {
+ break;
+ }
+ if (sep) {
+ *cp++ = sep;
+ }
+ }
+
+ *cp = '\0';
+
+ /* Return the result string */
+
+ return res;
+}
+
+
+/*****************************************************************
+ *
+ * The "table" functions.
+ */
+
+#if APR_CHARSET_EBCDIC
+#define CASE_MASK 0xbfbfbfbf
+#else
+#define CASE_MASK 0xdfdfdfdf
+#endif
+
+#define TABLE_HASH_SIZE 32
+#define TABLE_INDEX_MASK 0x1f
+#define TABLE_HASH(key) (TABLE_INDEX_MASK & *(unsigned char *)(key))
+#define TABLE_INDEX_IS_INITIALIZED(t, i) ((t)->index_initialized & (1u << (i)))
+#define TABLE_SET_INDEX_INITIALIZED(t, i) ((t)->index_initialized |= (1u << (i)))
+
+/* Compute the "checksum" for a key, consisting of the first
+ * 4 bytes, normalized for case-insensitivity and packed into
+ * an int...this checksum allows us to do a single integer
+ * comparison as a fast check to determine whether we can
+ * skip a strcasecmp
+ */
+#define COMPUTE_KEY_CHECKSUM(key, checksum) \
+{ \
+ const char *k = (key); \
+ apr_uint32_t c = (apr_uint32_t)*k; \
+ (checksum) = c; \
+ (checksum) <<= 8; \
+ if (c) { \
+ c = (apr_uint32_t)*++k; \
+ checksum |= c; \
+ } \
+ (checksum) <<= 8; \
+ if (c) { \
+ c = (apr_uint32_t)*++k; \
+ checksum |= c; \
+ } \
+ (checksum) <<= 8; \
+ if (c) { \
+ c = (apr_uint32_t)*++k; \
+ checksum |= c; \
+ } \
+ checksum &= CASE_MASK; \
+}
+
+/** The opaque string-content table type */
+struct apr_table_t {
+ /* This has to be first to promote backwards compatibility with
+ * older modules which cast a apr_table_t * to an apr_array_header_t *...
+ * they should use the apr_table_elts() function for most of the
+ * cases they do this for.
+ */
+ /** The underlying array for the table */
+ apr_array_header_t a;
+#ifdef MAKE_TABLE_PROFILE
+ /** Who created the array. */
+ void *creator;
+#endif
+ /* An index to speed up table lookups. The way this works is:
+ * - Hash the key into the index:
+ * - index_first[TABLE_HASH(key)] is the offset within
+ * the table of the first entry with that key
+ * - index_last[TABLE_HASH(key)] is the offset within
+ * the table of the last entry with that key
+ * - If (and only if) there is no entry in the table whose
+ * key hashes to index element i, then the i'th bit
+ * of index_initialized will be zero. (Check this before
+ * trying to use index_first[i] or index_last[i]!)
+ */
+ apr_uint32_t index_initialized;
+ int index_first[TABLE_HASH_SIZE];
+ int index_last[TABLE_HASH_SIZE];
+};
+
+/* keep state for apr_table_getm() */
+typedef struct
+{
+ apr_pool_t *p;
+ const char *first;
+ apr_array_header_t *merged;
+} table_getm_t;
+
+/*
+ * NOTICE: if you tweak this you should look at is_empty_table()
+ * and table_elts() in alloc.h
+ */
+#ifdef MAKE_TABLE_PROFILE
+static apr_table_entry_t *do_table_push(const char *func, apr_table_t *t)
+{
+ if (t->a.nelts == t->a.nalloc) {
+ fprintf(stderr, "%s: table created by %p hit limit of %u\n",
+ func ? func : "table_push", t->creator, t->a.nalloc);
+ }
+ return (apr_table_entry_t *) apr_array_push_noclear(&t->a);
+}
+#if defined(__GNUC__) && __GNUC__ >= 2
+#define table_push(t) do_table_push(__FUNCTION__, t)
+#else
+#define table_push(t) do_table_push(NULL, t)
+#endif
+#else /* MAKE_TABLE_PROFILE */
+#define table_push(t) ((apr_table_entry_t *) apr_array_push_noclear(&(t)->a))
+#endif /* MAKE_TABLE_PROFILE */
+
+APR_DECLARE(const apr_array_header_t *) apr_table_elts(const apr_table_t *t)
+{
+ return (const apr_array_header_t *)t;
+}
+
+APR_DECLARE(int) apr_is_empty_table(const apr_table_t *t)
+{
+ return ((t == NULL) || (t->a.nelts == 0));
+}
+
+APR_DECLARE(apr_table_t *) apr_table_make(apr_pool_t *p, int nelts)
+{
+ apr_table_t *t = apr_palloc(p, sizeof(apr_table_t));
+
+ make_array_core(&t->a, p, nelts, sizeof(apr_table_entry_t), 0);
+#ifdef MAKE_TABLE_PROFILE
+ t->creator = __builtin_return_address(0);
+#endif
+ t->index_initialized = 0;
+ return t;
+}
+
+APR_DECLARE(apr_table_t *) apr_table_copy(apr_pool_t *p, const apr_table_t *t)
+{
+ apr_table_t *new = apr_palloc(p, sizeof(apr_table_t));
+
+#if APR_POOL_DEBUG
+ /* we don't copy keys and values, so it's necessary that t->a.pool
+ * have a life span at least as long as p
+ */
+ if (!apr_pool_is_ancestor(t->a.pool, p)) {
+ fprintf(stderr, "apr_table_copy: t's pool is not an ancestor of p\n");
+ abort();
+ }
+#endif
+ make_array_core(&new->a, p, t->a.nalloc, sizeof(apr_table_entry_t), 0);
+ memcpy(new->a.elts, t->a.elts, t->a.nelts * sizeof(apr_table_entry_t));
+ new->a.nelts = t->a.nelts;
+ memcpy(new->index_first, t->index_first, sizeof(int) * TABLE_HASH_SIZE);
+ memcpy(new->index_last, t->index_last, sizeof(int) * TABLE_HASH_SIZE);
+ new->index_initialized = t->index_initialized;
+ return new;
+}
+
+APR_DECLARE(apr_table_t *) apr_table_clone(apr_pool_t *p, const apr_table_t *t)
+{
+ const apr_array_header_t *array = apr_table_elts(t);
+ apr_table_entry_t *elts = (apr_table_entry_t *) array->elts;
+ apr_table_t *new = apr_table_make(p, array->nelts);
+ int i;
+
+ for (i = 0; i < array->nelts; i++) {
+ apr_table_add(new, elts[i].key, elts[i].val);
+ }
+
+ return new;
+}
+
+static void table_reindex(apr_table_t *t)
+{
+ int i;
+ int hash;
+ apr_table_entry_t *next_elt = (apr_table_entry_t *) t->a.elts;
+
+ t->index_initialized = 0;
+ for (i = 0; i < t->a.nelts; i++, next_elt++) {
+ hash = TABLE_HASH(next_elt->key);
+ t->index_last[hash] = i;
+ if (!TABLE_INDEX_IS_INITIALIZED(t, hash)) {
+ t->index_first[hash] = i;
+ TABLE_SET_INDEX_INITIALIZED(t, hash);
+ }
+ }
+}
+
+APR_DECLARE(void) apr_table_clear(apr_table_t *t)
+{
+ t->a.nelts = 0;
+ t->index_initialized = 0;
+}
+
+APR_DECLARE(const char *) apr_table_get(const apr_table_t *t, const char *key)
+{
+ apr_table_entry_t *next_elt;
+ apr_table_entry_t *end_elt;
+ apr_uint32_t checksum;
+ int hash;
+
+ if (key == NULL) {
+ return NULL;
+ }
+
+ hash = TABLE_HASH(key);
+ if (!TABLE_INDEX_IS_INITIALIZED(t, hash)) {
+ return NULL;
+ }
+ COMPUTE_KEY_CHECKSUM(key, checksum);
+ next_elt = ((apr_table_entry_t *) t->a.elts) + t->index_first[hash];;
+ end_elt = ((apr_table_entry_t *) t->a.elts) + t->index_last[hash];
+
+ for (; next_elt <= end_elt; next_elt++) {
+ if ((checksum == next_elt->key_checksum) &&
+ !strcasecmp(next_elt->key, key)) {
+ return next_elt->val;
+ }
+ }
+
+ return NULL;
+}
+
+APR_DECLARE(void) apr_table_set(apr_table_t *t, const char *key,
+ const char *val)
+{
+ apr_table_entry_t *next_elt;
+ apr_table_entry_t *end_elt;
+ apr_table_entry_t *table_end;
+ apr_uint32_t checksum;
+ int hash;
+
+ COMPUTE_KEY_CHECKSUM(key, checksum);
+ hash = TABLE_HASH(key);
+ if (!TABLE_INDEX_IS_INITIALIZED(t, hash)) {
+ t->index_first[hash] = t->a.nelts;
+ TABLE_SET_INDEX_INITIALIZED(t, hash);
+ goto add_new_elt;
+ }
+ next_elt = ((apr_table_entry_t *) t->a.elts) + t->index_first[hash];;
+ end_elt = ((apr_table_entry_t *) t->a.elts) + t->index_last[hash];
+ table_end =((apr_table_entry_t *) t->a.elts) + t->a.nelts;
+
+ for (; next_elt <= end_elt; next_elt++) {
+ if ((checksum == next_elt->key_checksum) &&
+ !strcasecmp(next_elt->key, key)) {
+
+ /* Found an existing entry with the same key, so overwrite it */
+
+ int must_reindex = 0;
+ apr_table_entry_t *dst_elt = NULL;
+
+ next_elt->val = apr_pstrdup(t->a.pool, val);
+
+ /* Remove any other instances of this key */
+ for (next_elt++; next_elt <= end_elt; next_elt++) {
+ if ((checksum == next_elt->key_checksum) &&
+ !strcasecmp(next_elt->key, key)) {
+ t->a.nelts--;
+ if (!dst_elt) {
+ dst_elt = next_elt;
+ }
+ }
+ else if (dst_elt) {
+ *dst_elt++ = *next_elt;
+ must_reindex = 1;
+ }
+ }
+
+ /* If we've removed anything, shift over the remainder
+ * of the table (note that the previous loop didn't
+ * run to the end of the table, just to the last match
+ * for the index)
+ */
+ if (dst_elt) {
+ for (; next_elt < table_end; next_elt++) {
+ *dst_elt++ = *next_elt;
+ }
+ must_reindex = 1;
+ }
+ if (must_reindex) {
+ table_reindex(t);
+ }
+ return;
+ }
+ }
+
+add_new_elt:
+ t->index_last[hash] = t->a.nelts;
+ next_elt = (apr_table_entry_t *) table_push(t);
+ next_elt->key = apr_pstrdup(t->a.pool, key);
+ next_elt->val = apr_pstrdup(t->a.pool, val);
+ next_elt->key_checksum = checksum;
+}
+
+APR_DECLARE(void) apr_table_setn(apr_table_t *t, const char *key,
+ const char *val)
+{
+ apr_table_entry_t *next_elt;
+ apr_table_entry_t *end_elt;
+ apr_table_entry_t *table_end;
+ apr_uint32_t checksum;
+ int hash;
+
+ COMPUTE_KEY_CHECKSUM(key, checksum);
+ hash = TABLE_HASH(key);
+ if (!TABLE_INDEX_IS_INITIALIZED(t, hash)) {
+ t->index_first[hash] = t->a.nelts;
+ TABLE_SET_INDEX_INITIALIZED(t, hash);
+ goto add_new_elt;
+ }
+ next_elt = ((apr_table_entry_t *) t->a.elts) + t->index_first[hash];;
+ end_elt = ((apr_table_entry_t *) t->a.elts) + t->index_last[hash];
+ table_end =((apr_table_entry_t *) t->a.elts) + t->a.nelts;
+
+ for (; next_elt <= end_elt; next_elt++) {
+ if ((checksum == next_elt->key_checksum) &&
+ !strcasecmp(next_elt->key, key)) {
+
+ /* Found an existing entry with the same key, so overwrite it */
+
+ int must_reindex = 0;
+ apr_table_entry_t *dst_elt = NULL;
+
+ next_elt->val = (char *)val;
+
+ /* Remove any other instances of this key */
+ for (next_elt++; next_elt <= end_elt; next_elt++) {
+ if ((checksum == next_elt->key_checksum) &&
+ !strcasecmp(next_elt->key, key)) {
+ t->a.nelts--;
+ if (!dst_elt) {
+ dst_elt = next_elt;
+ }
+ }
+ else if (dst_elt) {
+ *dst_elt++ = *next_elt;
+ must_reindex = 1;
+ }
+ }
+
+ /* If we've removed anything, shift over the remainder
+ * of the table (note that the previous loop didn't
+ * run to the end of the table, just to the last match
+ * for the index)
+ */
+ if (dst_elt) {
+ for (; next_elt < table_end; next_elt++) {
+ *dst_elt++ = *next_elt;
+ }
+ must_reindex = 1;
+ }
+ if (must_reindex) {
+ table_reindex(t);
+ }
+ return;
+ }
+ }
+
+add_new_elt:
+ t->index_last[hash] = t->a.nelts;
+ next_elt = (apr_table_entry_t *) table_push(t);
+ next_elt->key = (char *)key;
+ next_elt->val = (char *)val;
+ next_elt->key_checksum = checksum;
+}
+
+APR_DECLARE(void) apr_table_unset(apr_table_t *t, const char *key)
+{
+ apr_table_entry_t *next_elt;
+ apr_table_entry_t *end_elt;
+ apr_table_entry_t *dst_elt;
+ apr_uint32_t checksum;
+ int hash;
+ int must_reindex;
+
+ hash = TABLE_HASH(key);
+ if (!TABLE_INDEX_IS_INITIALIZED(t, hash)) {
+ return;
+ }
+ COMPUTE_KEY_CHECKSUM(key, checksum);
+ next_elt = ((apr_table_entry_t *) t->a.elts) + t->index_first[hash];
+ end_elt = ((apr_table_entry_t *) t->a.elts) + t->index_last[hash];
+ must_reindex = 0;
+ for (; next_elt <= end_elt; next_elt++) {
+ if ((checksum == next_elt->key_checksum) &&
+ !strcasecmp(next_elt->key, key)) {
+
+ /* Found a match: remove this entry, plus any additional
+ * matches for the same key that might follow
+ */
+ apr_table_entry_t *table_end = ((apr_table_entry_t *) t->a.elts) +
+ t->a.nelts;
+ t->a.nelts--;
+ dst_elt = next_elt;
+ for (next_elt++; next_elt <= end_elt; next_elt++) {
+ if ((checksum == next_elt->key_checksum) &&
+ !strcasecmp(next_elt->key, key)) {
+ t->a.nelts--;
+ }
+ else {
+ *dst_elt++ = *next_elt;
+ }
+ }
+
+ /* Shift over the remainder of the table (note that
+ * the previous loop didn't run to the end of the table,
+ * just to the last match for the index)
+ */
+ for (; next_elt < table_end; next_elt++) {
+ *dst_elt++ = *next_elt;
+ }
+ must_reindex = 1;
+ break;
+ }
+ }
+ if (must_reindex) {
+ table_reindex(t);
+ }
+}
+
+APR_DECLARE(void) apr_table_merge(apr_table_t *t, const char *key,
+ const char *val)
+{
+ apr_table_entry_t *next_elt;
+ apr_table_entry_t *end_elt;
+ apr_uint32_t checksum;
+ int hash;
+
+ COMPUTE_KEY_CHECKSUM(key, checksum);
+ hash = TABLE_HASH(key);
+ if (!TABLE_INDEX_IS_INITIALIZED(t, hash)) {
+ t->index_first[hash] = t->a.nelts;
+ TABLE_SET_INDEX_INITIALIZED(t, hash);
+ goto add_new_elt;
+ }
+ next_elt = ((apr_table_entry_t *) t->a.elts) + t->index_first[hash];
+ end_elt = ((apr_table_entry_t *) t->a.elts) + t->index_last[hash];
+
+ for (; next_elt <= end_elt; next_elt++) {
+ if ((checksum == next_elt->key_checksum) &&
+ !strcasecmp(next_elt->key, key)) {
+
+ /* Found an existing entry with the same key, so merge with it */
+ next_elt->val = apr_pstrcat(t->a.pool, next_elt->val, ", ",
+ val, NULL);
+ return;
+ }
+ }
+
+add_new_elt:
+ t->index_last[hash] = t->a.nelts;
+ next_elt = (apr_table_entry_t *) table_push(t);
+ next_elt->key = apr_pstrdup(t->a.pool, key);
+ next_elt->val = apr_pstrdup(t->a.pool, val);
+ next_elt->key_checksum = checksum;
+}
+
+APR_DECLARE(void) apr_table_mergen(apr_table_t *t, const char *key,
+ const char *val)
+{
+ apr_table_entry_t *next_elt;
+ apr_table_entry_t *end_elt;
+ apr_uint32_t checksum;
+ int hash;
+
+#if APR_POOL_DEBUG
+ {
+ apr_pool_t *pool;
+ pool = apr_pool_find(key);
+ if ((pool != (apr_pool_t *)key)
+ && (!apr_pool_is_ancestor(pool, t->a.pool))) {
+ fprintf(stderr, "apr_table_mergen: key not in ancestor pool of t\n");
+ abort();
+ }
+ pool = apr_pool_find(val);
+ if ((pool != (apr_pool_t *)val)
+ && (!apr_pool_is_ancestor(pool, t->a.pool))) {
+ fprintf(stderr, "apr_table_mergen: val not in ancestor pool of t\n");
+ abort();
+ }
+ }
+#endif
+
+ COMPUTE_KEY_CHECKSUM(key, checksum);
+ hash = TABLE_HASH(key);
+ if (!TABLE_INDEX_IS_INITIALIZED(t, hash)) {
+ t->index_first[hash] = t->a.nelts;
+ TABLE_SET_INDEX_INITIALIZED(t, hash);
+ goto add_new_elt;
+ }
+ next_elt = ((apr_table_entry_t *) t->a.elts) + t->index_first[hash];;
+ end_elt = ((apr_table_entry_t *) t->a.elts) + t->index_last[hash];
+
+ for (; next_elt <= end_elt; next_elt++) {
+ if ((checksum == next_elt->key_checksum) &&
+ !strcasecmp(next_elt->key, key)) {
+
+ /* Found an existing entry with the same key, so merge with it */
+ next_elt->val = apr_pstrcat(t->a.pool, next_elt->val, ", ",
+ val, NULL);
+ return;
+ }
+ }
+
+add_new_elt:
+ t->index_last[hash] = t->a.nelts;
+ next_elt = (apr_table_entry_t *) table_push(t);
+ next_elt->key = (char *)key;
+ next_elt->val = (char *)val;
+ next_elt->key_checksum = checksum;
+}
+
+APR_DECLARE(void) apr_table_add(apr_table_t *t, const char *key,
+ const char *val)
+{
+ apr_table_entry_t *elts;
+ apr_uint32_t checksum;
+ int hash;
+
+ hash = TABLE_HASH(key);
+ t->index_last[hash] = t->a.nelts;
+ if (!TABLE_INDEX_IS_INITIALIZED(t, hash)) {
+ t->index_first[hash] = t->a.nelts;
+ TABLE_SET_INDEX_INITIALIZED(t, hash);
+ }
+ COMPUTE_KEY_CHECKSUM(key, checksum);
+ elts = (apr_table_entry_t *) table_push(t);
+ elts->key = apr_pstrdup(t->a.pool, key);
+ elts->val = apr_pstrdup(t->a.pool, val);
+ elts->key_checksum = checksum;
+}
+
+APR_DECLARE(void) apr_table_addn(apr_table_t *t, const char *key,
+ const char *val)
+{
+ apr_table_entry_t *elts;
+ apr_uint32_t checksum;
+ int hash;
+
+#if APR_POOL_DEBUG
+ {
+ if (!apr_pool_is_ancestor(apr_pool_find(key), t->a.pool)) {
+ fprintf(stderr, "apr_table_addn: key not in ancestor pool of t\n");
+ abort();
+ }
+ if (!apr_pool_is_ancestor(apr_pool_find(val), t->a.pool)) {
+ fprintf(stderr, "apr_table_addn: val not in ancestor pool of t\n");
+ abort();
+ }
+ }
+#endif
+
+ hash = TABLE_HASH(key);
+ t->index_last[hash] = t->a.nelts;
+ if (!TABLE_INDEX_IS_INITIALIZED(t, hash)) {
+ t->index_first[hash] = t->a.nelts;
+ TABLE_SET_INDEX_INITIALIZED(t, hash);
+ }
+ COMPUTE_KEY_CHECKSUM(key, checksum);
+ elts = (apr_table_entry_t *) table_push(t);
+ elts->key = (char *)key;
+ elts->val = (char *)val;
+ elts->key_checksum = checksum;
+}
+
+APR_DECLARE(apr_table_t *) apr_table_overlay(apr_pool_t *p,
+ const apr_table_t *overlay,
+ const apr_table_t *base)
+{
+ apr_table_t *res;
+
+#if APR_POOL_DEBUG
+ /* we don't copy keys and values, so it's necessary that
+ * overlay->a.pool and base->a.pool have a life span at least
+ * as long as p
+ */
+ if (!apr_pool_is_ancestor(overlay->a.pool, p)) {
+ fprintf(stderr,
+ "apr_table_overlay: overlay's pool is not an ancestor of p\n");
+ abort();
+ }
+ if (!apr_pool_is_ancestor(base->a.pool, p)) {
+ fprintf(stderr,
+ "apr_table_overlay: base's pool is not an ancestor of p\n");
+ abort();
+ }
+#endif
+
+ res = apr_palloc(p, sizeof(apr_table_t));
+ /* behave like append_arrays */
+ res->a.pool = p;
+ copy_array_hdr_core(&res->a, &overlay->a);
+ apr_array_cat(&res->a, &base->a);
+ table_reindex(res);
+ return res;
+}
+
+/* And now for something completely abstract ...
+
+ * For each key value given as a vararg:
+ * run the function pointed to as
+ * int comp(void *r, char *key, char *value);
+ * on each valid key-value pair in the apr_table_t t that matches the vararg key,
+ * or once for every valid key-value pair if the vararg list is empty,
+ * until the function returns false (0) or we finish the table.
+ *
+ * Note that we restart the traversal for each vararg, which means that
+ * duplicate varargs will result in multiple executions of the function
+ * for each matching key. Note also that if the vararg list is empty,
+ * only one traversal will be made and will cut short if comp returns 0.
+ *
+ * Note that the table_get and table_merge functions assume that each key in
+ * the apr_table_t is unique (i.e., no multiple entries with the same key). This
+ * function does not make that assumption, since it (unfortunately) isn't
+ * true for some of Apache's tables.
+ *
+ * Note that rec is simply passed-on to the comp function, so that the
+ * caller can pass additional info for the task.
+ *
+ * ADDENDUM for apr_table_vdo():
+ *
+ * The caching api will allow a user to walk the header values:
+ *
+ * apr_status_t apr_cache_el_header_walk(apr_cache_el *el,
+ * int (*comp)(void *, const char *, const char *), void *rec, ...);
+ *
+ * So it can be ..., however from there I use a callback that use a va_list:
+ *
+ * apr_status_t (*cache_el_header_walk)(apr_cache_el *el,
+ * int (*comp)(void *, const char *, const char *), void *rec, va_list);
+ *
+ * To pass those ...'s on down to the actual module that will handle walking
+ * their headers, in the file case this is actually just an apr_table - and
+ * rather than reimplementing apr_table_do (which IMHO would be bad) I just
+ * called it with the va_list. For mod_shmem_cache I don't need it since I
+ * can't use apr_table's, but mod_file_cache should (though a good hash would
+ * be better, but that's a different issue :).
+ *
+ * So to make mod_file_cache easier to maintain, it's a good thing
+ */
+APR_DECLARE_NONSTD(int) apr_table_do(apr_table_do_callback_fn_t *comp,
+ void *rec, const apr_table_t *t, ...)
+{
+ int rv;
+
+ va_list vp;
+ va_start(vp, t);
+ rv = apr_table_vdo(comp, rec, t, vp);
+ va_end(vp);
+
+ return rv;
+}
+
+/* XXX: do the semantics of this routine make any sense? Right now,
+ * if the caller passed in a non-empty va_list of keys to search for,
+ * the "early termination" facility only terminates on *that* key; other
+ * keys will continue to process. Note that this only has any effect
+ * at all if there are multiple entries in the table with the same key,
+ * otherwise the called function can never effectively early-terminate
+ * this function, as the zero return value is effectively ignored.
+ *
+ * Note also that this behavior is at odds with the behavior seen if an
+ * empty va_list is passed in -- in that case, a zero return value terminates
+ * the entire apr_table_vdo (which is what I think should happen in
+ * both cases).
+ *
+ * If nobody objects soon, I'm going to change the order of the nested
+ * loops in this function so that any zero return value from the (*comp)
+ * function will cause a full termination of apr_table_vdo. I'm hesitant
+ * at the moment because these (funky) semantics have been around for a
+ * very long time, and although Apache doesn't seem to use them at all,
+ * some third-party vendor might. I can only think of one possible reason
+ * the existing semantics would make any sense, and it's very Apache-centric,
+ * which is this: if (*comp) is looking for matches of a particular
+ * substring in request headers (let's say it's looking for a particular
+ * cookie name in the Set-Cookie headers), then maybe it wants to be
+ * able to stop searching early as soon as it finds that one and move
+ * on to the next key. That's only an optimization of course, but changing
+ * the behavior of this function would mean that any code that tried
+ * to do that would stop working right.
+ *
+ * Sigh. --JCW, 06/28/02
+ */
+APR_DECLARE(int) apr_table_vdo(apr_table_do_callback_fn_t *comp,
+ void *rec, const apr_table_t *t, va_list vp)
+{
+ char *argp;
+ apr_table_entry_t *elts = (apr_table_entry_t *) t->a.elts;
+ int vdorv = 1;
+
+ argp = va_arg(vp, char *);
+ do {
+ int rv = 1, i;
+ if (argp) {
+ /* Scan for entries that match the next key */
+ int hash = TABLE_HASH(argp);
+ if (TABLE_INDEX_IS_INITIALIZED(t, hash)) {
+ apr_uint32_t checksum;
+ COMPUTE_KEY_CHECKSUM(argp, checksum);
+ for (i = t->index_first[hash];
+ rv && (i <= t->index_last[hash]); ++i) {
+ if (elts[i].key && (checksum == elts[i].key_checksum) &&
+ !strcasecmp(elts[i].key, argp)) {
+ rv = (*comp) (rec, elts[i].key, elts[i].val);
+ }
+ }
+ }
+ }
+ else {
+ /* Scan the entire table */
+ for (i = 0; rv && (i < t->a.nelts); ++i) {
+ if (elts[i].key) {
+ rv = (*comp) (rec, elts[i].key, elts[i].val);
+ }
+ }
+ }
+ if (rv == 0) {
+ vdorv = 0;
+ }
+ } while (argp && ((argp = va_arg(vp, char *)) != NULL));
+
+ return vdorv;
+}
+
+static apr_table_entry_t **table_mergesort(apr_pool_t *pool,
+ apr_table_entry_t **values,
+ apr_size_t n)
+{
+ /* Bottom-up mergesort, based on design in Sedgewick's "Algorithms
+ * in C," chapter 8
+ */
+ apr_table_entry_t **values_tmp =
+ (apr_table_entry_t **)apr_palloc(pool, n * sizeof(apr_table_entry_t*));
+ apr_size_t i;
+ apr_size_t blocksize;
+
+ /* First pass: sort pairs of elements (blocksize=1) */
+ for (i = 0; i + 1 < n; i += 2) {
+ if (strcasecmp(values[i]->key, values[i + 1]->key) > 0) {
+ apr_table_entry_t *swap = values[i];
+ values[i] = values[i + 1];
+ values[i + 1] = swap;
+ }
+ }
+
+ /* Merge successively larger blocks */
+ blocksize = 2;
+ while (blocksize < n) {
+ apr_table_entry_t **dst = values_tmp;
+ apr_size_t next_start;
+ apr_table_entry_t **swap;
+
+ /* Merge consecutive pairs blocks of the next blocksize.
+ * Within a block, elements are in sorted order due to
+ * the previous iteration.
+ */
+ for (next_start = 0; next_start + blocksize < n;
+ next_start += (blocksize + blocksize)) {
+
+ apr_size_t block1_start = next_start;
+ apr_size_t block2_start = block1_start + blocksize;
+ apr_size_t block1_end = block2_start;
+ apr_size_t block2_end = block2_start + blocksize;
+ if (block2_end > n) {
+ /* The last block may be smaller than blocksize */
+ block2_end = n;
+ }
+ for (;;) {
+
+ /* Merge the next two blocks:
+ * Pick the smaller of the next element from
+ * block 1 and the next element from block 2.
+ * Once either of the blocks is emptied, copy
+ * over all the remaining elements from the
+ * other block
+ */
+ if (block1_start == block1_end) {
+ for (; block2_start < block2_end; block2_start++) {
+ *dst++ = values[block2_start];
+ }
+ break;
+ }
+ else if (block2_start == block2_end) {
+ for (; block1_start < block1_end; block1_start++) {
+ *dst++ = values[block1_start];
+ }
+ break;
+ }
+ if (strcasecmp(values[block1_start]->key,
+ values[block2_start]->key) > 0) {
+ *dst++ = values[block2_start++];
+ }
+ else {
+ *dst++ = values[block1_start++];
+ }
+ }
+ }
+
+ /* If n is not a multiple of 2*blocksize, some elements
+ * will be left over at the end of the array.
+ */
+ for (i = dst - values_tmp; i < n; i++) {
+ values_tmp[i] = values[i];
+ }
+
+ /* The output array of this pass becomes the input
+ * array of the next pass, and vice versa
+ */
+ swap = values_tmp;
+ values_tmp = values;
+ values = swap;
+
+ blocksize += blocksize;
+ }
+
+ return values;
+}
+
+APR_DECLARE(void) apr_table_compress(apr_table_t *t, unsigned flags)
+{
+ apr_table_entry_t **sort_array;
+ apr_table_entry_t **sort_next;
+ apr_table_entry_t **sort_end;
+ apr_table_entry_t *table_next;
+ apr_table_entry_t **last;
+ int i;
+ int dups_found;
+
+ if (flags == APR_OVERLAP_TABLES_ADD) {
+ return;
+ }
+
+ if (t->a.nelts <= 1) {
+ return;
+ }
+
+ /* Copy pointers to all the table elements into an
+ * array and sort to allow for easy detection of
+ * duplicate keys
+ */
+ sort_array = (apr_table_entry_t **)
+ apr_palloc(t->a.pool, t->a.nelts * sizeof(apr_table_entry_t*));
+ sort_next = sort_array;
+ table_next = (apr_table_entry_t *)t->a.elts;
+ i = t->a.nelts;
+ do {
+ *sort_next++ = table_next++;
+ } while (--i);
+
+ /* Note: the merge is done with mergesort instead of quicksort
+ * because mergesort is a stable sort and runs in n*log(n)
+ * time regardless of its inputs (quicksort is quadratic in
+ * the worst case)
+ */
+ sort_array = table_mergesort(t->a.pool, sort_array, t->a.nelts);
+
+ /* Process any duplicate keys */
+ dups_found = 0;
+ sort_next = sort_array;
+ sort_end = sort_array + t->a.nelts;
+ last = sort_next++;
+ while (sort_next < sort_end) {
+ if (((*sort_next)->key_checksum == (*last)->key_checksum) &&
+ !strcasecmp((*sort_next)->key, (*last)->key)) {
+ apr_table_entry_t **dup_last = sort_next + 1;
+ dups_found = 1;
+ while ((dup_last < sort_end) &&
+ ((*dup_last)->key_checksum == (*last)->key_checksum) &&
+ !strcasecmp((*dup_last)->key, (*last)->key)) {
+ dup_last++;
+ }
+ dup_last--; /* Elements from last through dup_last, inclusive,
+ * all have the same key
+ */
+ if (flags == APR_OVERLAP_TABLES_MERGE) {
+ apr_size_t len = 0;
+ apr_table_entry_t **next = last;
+ char *new_val;
+ char *val_dst;
+ do {
+ len += strlen((*next)->val);
+ len += 2; /* for ", " or trailing null */
+ } while (++next <= dup_last);
+ new_val = (char *)apr_palloc(t->a.pool, len);
+ val_dst = new_val;
+ next = last;
+ for (;;) {
+ strcpy(val_dst, (*next)->val);
+ val_dst += strlen((*next)->val);
+ next++;
+ if (next > dup_last) {
+ *val_dst = 0;
+ break;
+ }
+ else {
+ *val_dst++ = ',';
+ *val_dst++ = ' ';
+ }
+ }
+ (*last)->val = new_val;
+ }
+ else { /* overwrite */
+ (*last)->val = (*dup_last)->val;
+ }
+ do {
+ (*sort_next)->key = NULL;
+ } while (++sort_next <= dup_last);
+ }
+ else {
+ last = sort_next++;
+ }
+ }
+
+ /* Shift elements to the left to fill holes left by removing duplicates */
+ if (dups_found) {
+ apr_table_entry_t *src = (apr_table_entry_t *)t->a.elts;
+ apr_table_entry_t *dst = (apr_table_entry_t *)t->a.elts;
+ apr_table_entry_t *last_elt = src + t->a.nelts;
+ do {
+ if (src->key) {
+ *dst++ = *src;
+ }
+ } while (++src < last_elt);
+ t->a.nelts -= (int)(last_elt - dst);
+ }
+
+ table_reindex(t);
+}
+
+static void apr_table_cat(apr_table_t *t, const apr_table_t *s)
+{
+ const int n = t->a.nelts;
+ register int idx;
+
+ apr_array_cat(&t->a,&s->a);
+
+ if (n == 0) {
+ memcpy(t->index_first,s->index_first,sizeof(int) * TABLE_HASH_SIZE);
+ memcpy(t->index_last, s->index_last, sizeof(int) * TABLE_HASH_SIZE);
+ t->index_initialized = s->index_initialized;
+ return;
+ }
+
+ for (idx = 0; idx < TABLE_HASH_SIZE; ++idx) {
+ if (TABLE_INDEX_IS_INITIALIZED(s, idx)) {
+ t->index_last[idx] = s->index_last[idx] + n;
+ if (!TABLE_INDEX_IS_INITIALIZED(t, idx)) {
+ t->index_first[idx] = s->index_first[idx] + n;
+ }
+ }
+ }
+
+ t->index_initialized |= s->index_initialized;
+}
+
+APR_DECLARE(void) apr_table_overlap(apr_table_t *a, const apr_table_t *b,
+ unsigned flags)
+{
+ if (a->a.nelts + b->a.nelts == 0) {
+ return;
+ }
+
+#if APR_POOL_DEBUG
+ /* Since the keys and values are not copied, it's required that
+ * b->a.pool has a lifetime at least as long as a->a.pool. */
+ if (!apr_pool_is_ancestor(b->a.pool, a->a.pool)) {
+ fprintf(stderr, "apr_table_overlap: b's pool is not an ancestor of a's\n");
+ abort();
+ }
+#endif
+
+ apr_table_cat(a, b);
+
+ apr_table_compress(a, flags);
+}
+
+static int table_getm_do(void *v, const char *key, const char *val)
+{
+ table_getm_t *state = (table_getm_t *) v;
+
+ if (!state->first) {
+ /**
+ * The most common case is a single header, and this is covered by
+ * a fast path that doesn't allocate any memory. On the second and
+ * subsequent header, an array is created and the array concatenated
+ * together to form the final value.
+ */
+ state->first = val;
+ }
+ else {
+ const char **elt;
+ if (!state->merged) {
+ state->merged = apr_array_make(state->p, 10, sizeof(const char *));
+ elt = apr_array_push(state->merged);
+ *elt = state->first;
+ }
+ elt = apr_array_push(state->merged);
+ *elt = val;
+ }
+ return 1;
+}
+
+APR_DECLARE(const char *) apr_table_getm(apr_pool_t *p, const apr_table_t *t,
+ const char *key)
+{
+ table_getm_t state;
+
+ state.p = p;
+ state.first = NULL;
+ state.merged = NULL;
+
+ apr_table_do(table_getm_do, &state, t, key, NULL);
+
+ if (!state.first) {
+ return NULL;
+ }
+ else if (!state.merged) {
+ return state.first;
+ }
+ else {
+ return apr_array_pstrcat(p, state.merged, ',');
+ }
+}