summaryrefslogtreecommitdiffstats
path: root/ccan/ccan
diff options
context:
space:
mode:
Diffstat (limited to 'ccan/ccan')
-rw-r--r--ccan/ccan/build_assert/_info49
-rw-r--r--ccan/ccan/check_type/_info33
l---------ccan/ccan/compiler/LICENSE1
-rw-r--r--ccan/ccan/compiler/compiler.h317
-rw-r--r--ccan/ccan/container_of/_info65
-rw-r--r--ccan/ccan/endian/_info55
l---------ccan/ccan/hash/LICENSE1
-rw-r--r--ccan/ccan/hash/hash.c926
-rw-r--r--ccan/ccan/hash/hash.h313
l---------ccan/ccan/htable/LICENSE1
-rw-r--r--ccan/ccan/htable/htable.c491
-rw-r--r--ccan/ccan/htable/htable.h290
-rw-r--r--ccan/ccan/htable/htable_type.h188
l---------ccan/ccan/ilog/LICENSE1
-rw-r--r--ccan/ccan/ilog/ilog.c141
-rw-r--r--ccan/ccan/ilog/ilog.h154
l---------ccan/ccan/likely/LICENSE1
-rw-r--r--ccan/ccan/likely/likely.c136
-rw-r--r--ccan/ccan/likely/likely.h111
-rw-r--r--ccan/ccan/list/_info72
l---------ccan/ccan/short_types/LICENSE1
-rw-r--r--ccan/ccan/short_types/short_types.h35
-rw-r--r--ccan/ccan/str/_info52
-rw-r--r--ccan/ccan/strset/strset.c309
-rw-r--r--ccan/ccan/strset/strset.h167
l---------ccan/ccan/typesafe_cb/LICENSE1
-rw-r--r--ccan/ccan/typesafe_cb/typesafe_cb.h134
27 files changed, 3719 insertions, 326 deletions
diff --git a/ccan/ccan/build_assert/_info b/ccan/ccan/build_assert/_info
deleted file mode 100644
index 97ebe6c..0000000
--- a/ccan/ccan/build_assert/_info
+++ /dev/null
@@ -1,49 +0,0 @@
-#include "config.h"
-#include <stdio.h>
-#include <string.h>
-
-/**
- * build_assert - routines for build-time assertions
- *
- * This code provides routines which will cause compilation to fail should some
- * assertion be untrue: such failures are preferable to run-time assertions,
- * but much more limited since they can only depends on compile-time constants.
- *
- * These assertions are most useful when two parts of the code must be kept in
- * sync: it is better to avoid such cases if possible, but seconds best is to
- * detect invalid changes at build time.
- *
- * For example, a tricky piece of code might rely on a certain element being at
- * the start of the structure. To ensure that future changes don't break it,
- * you would catch such changes in your code like so:
- *
- * Example:
- * #include <stddef.h>
- * #include <ccan/build_assert/build_assert.h>
- *
- * struct foo {
- * char string[5];
- * int x;
- * };
- *
- * static char *foo_string(struct foo *foo)
- * {
- * // This trick requires that the string be first in the structure
- * BUILD_ASSERT(offsetof(struct foo, string) == 0);
- * return (char *)foo;
- * }
- *
- * License: CC0 (Public domain)
- * Author: Rusty Russell <rusty@rustcorp.com.au>
- */
-int main(int argc, char *argv[])
-{
- if (argc != 2)
- return 1;
-
- if (strcmp(argv[1], "depends") == 0)
- /* Nothing. */
- return 0;
-
- return 1;
-}
diff --git a/ccan/ccan/check_type/_info b/ccan/ccan/check_type/_info
deleted file mode 100644
index cc42673..0000000
--- a/ccan/ccan/check_type/_info
+++ /dev/null
@@ -1,33 +0,0 @@
-#include "config.h"
-#include <stdio.h>
-#include <string.h>
-
-/**
- * check_type - routines for compile time type checking
- *
- * C has fairly weak typing: ints get automatically converted to longs, signed
- * to unsigned, etc. There are some cases where this is best avoided, and
- * these macros provide methods for evoking warnings (or build errors) when
- * a precise type isn't used.
- *
- * On compilers which don't support typeof() these routines are less effective,
- * since they have to use sizeof() which can only distiguish between types of
- * different size.
- *
- * License: CC0 (Public domain)
- * Author: Rusty Russell <rusty@rustcorp.com.au>
- */
-int main(int argc, char *argv[])
-{
- if (argc != 2)
- return 1;
-
- if (strcmp(argv[1], "depends") == 0) {
-#if !HAVE_TYPEOF
- printf("ccan/build_assert\n");
-#endif
- return 0;
- }
-
- return 1;
-}
diff --git a/ccan/ccan/compiler/LICENSE b/ccan/ccan/compiler/LICENSE
new file mode 120000
index 0000000..b7951da
--- /dev/null
+++ b/ccan/ccan/compiler/LICENSE
@@ -0,0 +1 @@
+../../licenses/CC0 \ No newline at end of file
diff --git a/ccan/ccan/compiler/compiler.h b/ccan/ccan/compiler/compiler.h
new file mode 100644
index 0000000..562b29e
--- /dev/null
+++ b/ccan/ccan/compiler/compiler.h
@@ -0,0 +1,317 @@
+/* CC0 (Public domain) - see LICENSE file for details */
+#ifndef CCAN_COMPILER_H
+#define CCAN_COMPILER_H
+#include "config.h"
+
+#ifndef COLD
+#if HAVE_ATTRIBUTE_COLD
+/**
+ * COLD - a function is unlikely to be called.
+ *
+ * Used to mark an unlikely code path and optimize appropriately.
+ * It is usually used on logging or error routines.
+ *
+ * Example:
+ * static void COLD moan(const char *reason)
+ * {
+ * fprintf(stderr, "Error: %s (%s)\n", reason, strerror(errno));
+ * }
+ */
+#define COLD __attribute__((__cold__))
+#else
+#define COLD
+#endif
+#endif
+
+#ifndef NORETURN
+#if HAVE_ATTRIBUTE_NORETURN
+/**
+ * NORETURN - a function does not return
+ *
+ * Used to mark a function which exits; useful for suppressing warnings.
+ *
+ * Example:
+ * static void NORETURN fail(const char *reason)
+ * {
+ * fprintf(stderr, "Error: %s (%s)\n", reason, strerror(errno));
+ * exit(1);
+ * }
+ */
+#define NORETURN __attribute__((__noreturn__))
+#else
+#define NORETURN
+#endif
+#endif
+
+#ifndef PRINTF_FMT
+#if HAVE_ATTRIBUTE_PRINTF
+/**
+ * PRINTF_FMT - a function takes printf-style arguments
+ * @nfmt: the 1-based number of the function's format argument.
+ * @narg: the 1-based number of the function's first variable argument.
+ *
+ * This allows the compiler to check your parameters as it does for printf().
+ *
+ * Example:
+ * void PRINTF_FMT(2,3) my_printf(const char *prefix, const char *fmt, ...);
+ */
+#define PRINTF_FMT(nfmt, narg) \
+ __attribute__((format(__printf__, nfmt, narg)))
+#else
+#define PRINTF_FMT(nfmt, narg)
+#endif
+#endif
+
+#ifndef CONST_FUNCTION
+#if HAVE_ATTRIBUTE_CONST
+/**
+ * CONST_FUNCTION - a function's return depends only on its argument
+ *
+ * This allows the compiler to assume that the function will return the exact
+ * same value for the exact same arguments. This implies that the function
+ * must not use global variables, or dereference pointer arguments.
+ */
+#define CONST_FUNCTION __attribute__((__const__))
+#else
+#define CONST_FUNCTION
+#endif
+
+#ifndef PURE_FUNCTION
+#if HAVE_ATTRIBUTE_PURE
+/**
+ * PURE_FUNCTION - a function is pure
+ *
+ * A pure function is one that has no side effects other than it's return value
+ * and uses no inputs other than it's arguments and global variables.
+ */
+#define PURE_FUNCTION __attribute__((__pure__))
+#else
+#define PURE_FUNCTION
+#endif
+#endif
+#endif
+
+#if HAVE_ATTRIBUTE_UNUSED
+#ifndef UNNEEDED
+/**
+ * UNNEEDED - a variable/function may not be needed
+ *
+ * This suppresses warnings about unused variables or functions, but tells
+ * the compiler that if it is unused it need not emit it into the source code.
+ *
+ * Example:
+ * // With some preprocessor options, this is unnecessary.
+ * static UNNEEDED int counter;
+ *
+ * // With some preprocessor options, this is unnecessary.
+ * static UNNEEDED void add_to_counter(int add)
+ * {
+ * counter += add;
+ * }
+ */
+#define UNNEEDED __attribute__((__unused__))
+#endif
+
+#ifndef NEEDED
+#if HAVE_ATTRIBUTE_USED
+/**
+ * NEEDED - a variable/function is needed
+ *
+ * This suppresses warnings about unused variables or functions, but tells
+ * the compiler that it must exist even if it (seems) unused.
+ *
+ * Example:
+ * // Even if this is unused, these are vital for debugging.
+ * static NEEDED int counter;
+ * static NEEDED void dump_counter(void)
+ * {
+ * printf("Counter is %i\n", counter);
+ * }
+ */
+#define NEEDED __attribute__((__used__))
+#else
+/* Before used, unused functions and vars were always emitted. */
+#define NEEDED __attribute__((__unused__))
+#endif
+#endif
+
+#ifndef UNUSED
+/**
+ * UNUSED - a parameter is unused
+ *
+ * Some compilers (eg. gcc with -W or -Wunused) warn about unused
+ * function parameters. This suppresses such warnings and indicates
+ * to the reader that it's deliberate.
+ *
+ * Example:
+ * // This is used as a callback, so needs to have this prototype.
+ * static int some_callback(void *unused UNUSED)
+ * {
+ * return 0;
+ * }
+ */
+#define UNUSED __attribute__((__unused__))
+#endif
+#else
+#ifndef UNNEEDED
+#define UNNEEDED
+#endif
+#ifndef NEEDED
+#define NEEDED
+#endif
+#ifndef UNUSED
+#define UNUSED
+#endif
+#endif
+
+#ifndef IS_COMPILE_CONSTANT
+#if HAVE_BUILTIN_CONSTANT_P
+/**
+ * IS_COMPILE_CONSTANT - does the compiler know the value of this expression?
+ * @expr: the expression to evaluate
+ *
+ * When an expression manipulation is complicated, it is usually better to
+ * implement it in a function. However, if the expression being manipulated is
+ * known at compile time, it is better to have the compiler see the entire
+ * expression so it can simply substitute the result.
+ *
+ * This can be done using the IS_COMPILE_CONSTANT() macro.
+ *
+ * Example:
+ * enum greek { ALPHA, BETA, GAMMA, DELTA, EPSILON };
+ *
+ * // Out-of-line version.
+ * const char *greek_name(enum greek greek);
+ *
+ * // Inline version.
+ * static inline const char *_greek_name(enum greek greek)
+ * {
+ * switch (greek) {
+ * case ALPHA: return "alpha";
+ * case BETA: return "beta";
+ * case GAMMA: return "gamma";
+ * case DELTA: return "delta";
+ * case EPSILON: return "epsilon";
+ * default: return "**INVALID**";
+ * }
+ * }
+ *
+ * // Use inline if compiler knows answer. Otherwise call function
+ * // to avoid copies of the same code everywhere.
+ * #define greek_name(g) \
+ * (IS_COMPILE_CONSTANT(greek) ? _greek_name(g) : greek_name(g))
+ */
+#define IS_COMPILE_CONSTANT(expr) __builtin_constant_p(expr)
+#else
+/* If we don't know, assume it's not. */
+#define IS_COMPILE_CONSTANT(expr) 0
+#endif
+#endif
+
+#ifndef WARN_UNUSED_RESULT
+#if HAVE_WARN_UNUSED_RESULT
+/**
+ * WARN_UNUSED_RESULT - warn if a function return value is unused.
+ *
+ * Used to mark a function where it is extremely unlikely that the caller
+ * can ignore the result, eg realloc().
+ *
+ * Example:
+ * // buf param may be freed by this; need return value!
+ * static char *WARN_UNUSED_RESULT enlarge(char *buf, unsigned *size)
+ * {
+ * return realloc(buf, (*size) *= 2);
+ * }
+ */
+#define WARN_UNUSED_RESULT __attribute__((__warn_unused_result__))
+#else
+#define WARN_UNUSED_RESULT
+#endif
+#endif
+
+
+#if HAVE_ATTRIBUTE_DEPRECATED
+/**
+ * WARN_DEPRECATED - warn that a function/type/variable is deprecated when used.
+ *
+ * Used to mark a function, type or variable should not be used.
+ *
+ * Example:
+ * WARN_DEPRECATED char *oldfunc(char *buf);
+ */
+#define WARN_DEPRECATED __attribute__((__deprecated__))
+#else
+#define WARN_DEPRECATED
+#endif
+
+
+#if HAVE_ATTRIBUTE_NONNULL
+/**
+ * NO_NULL_ARGS - specify that no arguments to this function can be NULL.
+ *
+ * The compiler will warn if any pointer args are NULL.
+ *
+ * Example:
+ * NO_NULL_ARGS char *my_copy(char *buf);
+ */
+#define NO_NULL_ARGS __attribute__((__nonnull__))
+
+/**
+ * NON_NULL_ARGS - specify that some arguments to this function can't be NULL.
+ * @...: 1-based argument numbers for which args can't be NULL.
+ *
+ * The compiler will warn if any of the specified pointer args are NULL.
+ *
+ * Example:
+ * char *my_copy2(char *buf, char *maybenull) NON_NULL_ARGS(1);
+ */
+#define NON_NULL_ARGS(...) __attribute__((__nonnull__(__VA_ARGS__)))
+#else
+#define NO_NULL_ARGS
+#define NON_NULL_ARGS(...)
+#endif
+
+#if HAVE_ATTRIBUTE_RETURNS_NONNULL
+/**
+ * RETURNS_NONNULL - specify that this function cannot return NULL.
+ *
+ * Mainly an optimization opportunity, but can also suppress warnings.
+ *
+ * Example:
+ * RETURNS_NONNULL char *my_copy(char *buf);
+ */
+#define RETURNS_NONNULL __attribute__((__returns_nonnull__))
+#else
+#define RETURNS_NONNULL
+#endif
+
+#if HAVE_ATTRIBUTE_SENTINEL
+/**
+ * LAST_ARG_NULL - specify the last argument of a variadic function must be NULL.
+ *
+ * The compiler will warn if the last argument isn't NULL.
+ *
+ * Example:
+ * char *join_string(char *buf, ...) LAST_ARG_NULL;
+ */
+#define LAST_ARG_NULL __attribute__((__sentinel__))
+#else
+#define LAST_ARG_NULL
+#endif
+
+#if HAVE_BUILTIN_CPU_SUPPORTS
+/**
+ * cpu_supports - test if current CPU supports the named feature.
+ *
+ * This takes a literal string, and currently only works on glibc platforms.
+ *
+ * Example:
+ * if (cpu_supports("mmx"))
+ * printf("MMX support engaged!\n");
+ */
+#define cpu_supports(x) __builtin_cpu_supports(x)
+#else
+#define cpu_supports(x) 0
+#endif /* HAVE_BUILTIN_CPU_SUPPORTS */
+
+#endif /* CCAN_COMPILER_H */
diff --git a/ccan/ccan/container_of/_info b/ccan/ccan/container_of/_info
deleted file mode 100644
index b116052..0000000
--- a/ccan/ccan/container_of/_info
+++ /dev/null
@@ -1,65 +0,0 @@
-#include "config.h"
-#include <stdio.h>
-#include <string.h>
-
-/**
- * container_of - routine for upcasting
- *
- * It is often convenient to create code where the caller registers a pointer
- * to a generic structure and a callback. The callback might know that the
- * pointer points to within a larger structure, and container_of gives a
- * convenient and fairly type-safe way of returning to the enclosing structure.
- *
- * This idiom is an alternative to providing a void * pointer for every
- * callback.
- *
- * Example:
- * #include <stdio.h>
- * #include <ccan/container_of/container_of.h>
- *
- * struct timer {
- * void *members;
- * };
- *
- * struct info {
- * int my_stuff;
- * struct timer timer;
- * };
- *
- * static void my_timer_callback(struct timer *timer)
- * {
- * struct info *info = container_of(timer, struct info, timer);
- * printf("my_stuff is %u\n", info->my_stuff);
- * }
- *
- * static void register_timer(struct timer *timer)
- * {
- * (void)timer;
- * (void)my_timer_callback;
- * //...
- * }
- *
- * int main(void)
- * {
- * struct info info = { .my_stuff = 1 };
- *
- * register_timer(&info.timer);
- * // ...
- * return 0;
- * }
- *
- * License: CC0 (Public domain)
- * Author: Rusty Russell <rusty@rustcorp.com.au>
- */
-int main(int argc, char *argv[])
-{
- if (argc != 2)
- return 1;
-
- if (strcmp(argv[1], "depends") == 0) {
- printf("ccan/check_type\n");
- return 0;
- }
-
- return 1;
-}
diff --git a/ccan/ccan/endian/_info b/ccan/ccan/endian/_info
deleted file mode 100644
index efe5a8b..0000000
--- a/ccan/ccan/endian/_info
+++ /dev/null
@@ -1,55 +0,0 @@
-#include "config.h"
-#include <stdio.h>
-#include <string.h>
-
-/**
- * endian - endian conversion macros for simple types
- *
- * Portable protocols (such as on-disk formats, or network protocols)
- * are often defined to be a particular endian: little-endian (least
- * significant bytes first) or big-endian (most significant bytes
- * first).
- *
- * Similarly, some CPUs lay out values in memory in little-endian
- * order (most commonly, Intel's 8086 and derivatives), or big-endian
- * order (almost everyone else).
- *
- * This module provides conversion routines, inspired by the linux kernel.
- * It also provides leint32_t, beint32_t etc typedefs, which are annotated for
- * the sparse checker.
- *
- * Example:
- * #include <stdio.h>
- * #include <err.h>
- * #include <ccan/endian/endian.h>
- *
- * //
- * int main(int argc, char *argv[])
- * {
- * uint32_t value;
- *
- * if (argc != 2)
- * errx(1, "Usage: %s <value>", argv[0]);
- *
- * value = atoi(argv[1]);
- * printf("native: %08x\n", value);
- * printf("little-endian: %08x\n", cpu_to_le32(value));
- * printf("big-endian: %08x\n", cpu_to_be32(value));
- * printf("byte-reversed: %08x\n", bswap_32(value));
- * exit(0);
- * }
- *
- * License: License: CC0 (Public domain)
- * Author: Rusty Russell <rusty@rustcorp.com.au>
- */
-int main(int argc, char *argv[])
-{
- if (argc != 2)
- return 1;
-
- if (strcmp(argv[1], "depends") == 0)
- /* Nothing */
- return 0;
-
- return 1;
-}
diff --git a/ccan/ccan/hash/LICENSE b/ccan/ccan/hash/LICENSE
new file mode 120000
index 0000000..b7951da
--- /dev/null
+++ b/ccan/ccan/hash/LICENSE
@@ -0,0 +1 @@
+../../licenses/CC0 \ No newline at end of file
diff --git a/ccan/ccan/hash/hash.c b/ccan/ccan/hash/hash.c
new file mode 100644
index 0000000..88d88fc
--- /dev/null
+++ b/ccan/ccan/hash/hash.c
@@ -0,0 +1,926 @@
+/* CC0 (Public domain) - see LICENSE file for details */
+/*
+-------------------------------------------------------------------------------
+lookup3.c, by Bob Jenkins, May 2006, Public Domain.
+
+These are functions for producing 32-bit hashes for hash table lookup.
+hash_word(), hashlittle(), hashlittle2(), hashbig(), mix(), and final()
+are externally useful functions. Routines to test the hash are included
+if SELF_TEST is defined. You can use this free for any purpose. It's in
+the public domain. It has no warranty.
+
+You probably want to use hashlittle(). hashlittle() and hashbig()
+hash byte arrays. hashlittle() is is faster than hashbig() on
+little-endian machines. Intel and AMD are little-endian machines.
+On second thought, you probably want hashlittle2(), which is identical to
+hashlittle() except it returns two 32-bit hashes for the price of one.
+You could implement hashbig2() if you wanted but I haven't bothered here.
+
+If you want to find a hash of, say, exactly 7 integers, do
+ a = i1; b = i2; c = i3;
+ mix(a,b,c);
+ a += i4; b += i5; c += i6;
+ mix(a,b,c);
+ a += i7;
+ final(a,b,c);
+then use c as the hash value. If you have a variable length array of
+4-byte integers to hash, use hash_word(). If you have a byte array (like
+a character string), use hashlittle(). If you have several byte arrays, or
+a mix of things, see the comments above hashlittle().
+
+Why is this so big? I read 12 bytes at a time into 3 4-byte integers,
+then mix those integers. This is fast (you can do a lot more thorough
+mixing with 12*3 instructions on 3 integers than you can with 3 instructions
+on 1 byte), but shoehorning those bytes into integers efficiently is messy.
+-------------------------------------------------------------------------------
+*/
+//#define SELF_TEST 1
+
+#if 0
+#include <stdio.h> /* defines printf for tests */
+#include <time.h> /* defines time_t for timings in the test */
+#include <stdint.h> /* defines uint32_t etc */
+#include <sys/param.h> /* attempt to define endianness */
+
+#ifdef linux
+# include <endian.h> /* attempt to define endianness */
+#endif
+
+/*
+ * My best guess at if you are big-endian or little-endian. This may
+ * need adjustment.
+ */
+#if (defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && \
+ __BYTE_ORDER == __LITTLE_ENDIAN) || \
+ (defined(i386) || defined(__i386__) || defined(__i486__) || \
+ defined(__i586__) || defined(__i686__) || defined(__x86_64) || \
+ defined(vax) || defined(MIPSEL))
+# define HASH_LITTLE_ENDIAN 1
+# define HASH_BIG_ENDIAN 0
+#elif (defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && \
+ __BYTE_ORDER == __BIG_ENDIAN) || \
+ (defined(sparc) || defined(POWERPC) || defined(mc68000) || defined(sel))
+# define HASH_LITTLE_ENDIAN 0
+# define HASH_BIG_ENDIAN 1
+#else
+# error Unknown endian
+#endif
+#endif /* old hash.c headers. */
+
+#include "hash.h"
+
+#if HAVE_LITTLE_ENDIAN
+#define HASH_LITTLE_ENDIAN 1
+#define HASH_BIG_ENDIAN 0
+#elif HAVE_BIG_ENDIAN
+#define HASH_LITTLE_ENDIAN 0
+#define HASH_BIG_ENDIAN 1
+#else
+#error Unknown endian
+#endif
+
+#define hashsize(n) ((uint32_t)1<<(n))
+#define hashmask(n) (hashsize(n)-1)
+#define rot(x,k) (((x)<<(k)) | ((x)>>(32-(k))))
+
+/*
+-------------------------------------------------------------------------------
+mix -- mix 3 32-bit values reversibly.
+
+This is reversible, so any information in (a,b,c) before mix() is
+still in (a,b,c) after mix().
+
+If four pairs of (a,b,c) inputs are run through mix(), or through
+mix() in reverse, there are at least 32 bits of the output that
+are sometimes the same for one pair and different for another pair.
+This was tested for:
+* pairs that differed by one bit, by two bits, in any combination
+ of top bits of (a,b,c), or in any combination of bottom bits of
+ (a,b,c).
+* "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
+ the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
+ is commonly produced by subtraction) look like a single 1-bit
+ difference.
+* the base values were pseudorandom, all zero but one bit set, or
+ all zero plus a counter that starts at zero.
+
+Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that
+satisfy this are
+ 4 6 8 16 19 4
+ 9 15 3 18 27 15
+ 14 9 3 7 17 3
+Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing
+for "differ" defined as + with a one-bit base and a two-bit delta. I
+used http://burtleburtle.net/bob/hash/avalanche.html to choose
+the operations, constants, and arrangements of the variables.
+
+This does not achieve avalanche. There are input bits of (a,b,c)
+that fail to affect some output bits of (a,b,c), especially of a. The
+most thoroughly mixed value is c, but it doesn't really even achieve
+avalanche in c.
+
+This allows some parallelism. Read-after-writes are good at doubling
+the number of bits affected, so the goal of mixing pulls in the opposite
+direction as the goal of parallelism. I did what I could. Rotates
+seem to cost as much as shifts on every machine I could lay my hands
+on, and rotates are much kinder to the top and bottom bits, so I used
+rotates.
+-------------------------------------------------------------------------------
+*/
+#define mix(a,b,c) \
+{ \
+ a -= c; a ^= rot(c, 4); c += b; \
+ b -= a; b ^= rot(a, 6); a += c; \
+ c -= b; c ^= rot(b, 8); b += a; \
+ a -= c; a ^= rot(c,16); c += b; \
+ b -= a; b ^= rot(a,19); a += c; \
+ c -= b; c ^= rot(b, 4); b += a; \
+}
+
+/*
+-------------------------------------------------------------------------------
+final -- final mixing of 3 32-bit values (a,b,c) into c
+
+Pairs of (a,b,c) values differing in only a few bits will usually
+produce values of c that look totally different. This was tested for
+* pairs that differed by one bit, by two bits, in any combination
+ of top bits of (a,b,c), or in any combination of bottom bits of
+ (a,b,c).
+* "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
+ the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
+ is commonly produced by subtraction) look like a single 1-bit
+ difference.
+* the base values were pseudorandom, all zero but one bit set, or
+ all zero plus a counter that starts at zero.
+
+These constants passed:
+ 14 11 25 16 4 14 24
+ 12 14 25 16 4 14 24
+and these came close:
+ 4 8 15 26 3 22 24
+ 10 8 15 26 3 22 24
+ 11 8 15 26 3 22 24
+-------------------------------------------------------------------------------
+*/
+#define final(a,b,c) \
+{ \
+ c ^= b; c -= rot(b,14); \
+ a ^= c; a -= rot(c,11); \
+ b ^= a; b -= rot(a,25); \
+ c ^= b; c -= rot(b,16); \
+ a ^= c; a -= rot(c,4); \
+ b ^= a; b -= rot(a,14); \
+ c ^= b; c -= rot(b,24); \
+}
+
+/*
+--------------------------------------------------------------------
+ This works on all machines. To be useful, it requires
+ -- that the key be an array of uint32_t's, and
+ -- that the length be the number of uint32_t's in the key
+
+ The function hash_word() is identical to hashlittle() on little-endian
+ machines, and identical to hashbig() on big-endian machines,
+ except that the length has to be measured in uint32_ts rather than in
+ bytes. hashlittle() is more complicated than hash_word() only because
+ hashlittle() has to dance around fitting the key bytes into registers.
+--------------------------------------------------------------------
+*/
+uint32_t hash_u32(
+const uint32_t *k, /* the key, an array of uint32_t values */
+size_t length, /* the length of the key, in uint32_ts */
+uint32_t initval) /* the previous hash, or an arbitrary value */
+{
+ uint32_t a,b,c;
+
+ /* Set up the internal state */
+ a = b = c = 0xdeadbeef + (((uint32_t)length)<<2) + initval;
+
+ /*------------------------------------------------- handle most of the key */
+ while (length > 3)
+ {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ mix(a,b,c);
+ length -= 3;
+ k += 3;
+ }
+
+ /*------------------------------------------- handle the last 3 uint32_t's */
+ switch(length) /* all the case statements fall through */
+ {
+ case 3 : c+=k[2];
+ case 2 : b+=k[1];
+ case 1 : a+=k[0];
+ final(a,b,c);
+ case 0: /* case 0: nothing left to add */
+ break;
+ }
+ /*------------------------------------------------------ report the result */
+ return c;
+}
+
+/*
+-------------------------------------------------------------------------------
+hashlittle() -- hash a variable-length key into a 32-bit value
+ k : the key (the unaligned variable-length array of bytes)
+ length : the length of the key, counting by bytes
+ val2 : IN: can be any 4-byte value OUT: second 32 bit hash.
+Returns a 32-bit value. Every bit of the key affects every bit of
+the return value. Two keys differing by one or two bits will have
+totally different hash values. Note that the return value is better
+mixed than val2, so use that first.
+
+The best hash table sizes are powers of 2. There is no need to do
+mod a prime (mod is sooo slow!). If you need less than 32 bits,
+use a bitmask. For example, if you need only 10 bits, do
+ h = (h & hashmask(10));
+In which case, the hash table should have hashsize(10) elements.
+
+If you are hashing n strings (uint8_t **)k, do it like this:
+ for (i=0, h=0; i<n; ++i) h = hashlittle( k[i], len[i], h);
+
+By Bob Jenkins, 2006. bob_jenkins@burtleburtle.net. You may use this
+code any way you wish, private, educational, or commercial. It's free.
+
+Use for hash table lookup, or anything where one collision in 2^^32 is
+acceptable. Do NOT use for cryptographic purposes.
+-------------------------------------------------------------------------------
+*/
+
+static uint32_t hashlittle( const void *key, size_t length, uint32_t *val2 )
+{
+ uint32_t a,b,c; /* internal state */
+ union { const void *ptr; size_t i; } u; /* needed for Mac Powerbook G4 */
+
+ /* Set up the internal state */
+ a = b = c = 0xdeadbeef + ((uint32_t)length) + *val2;
+
+ u.ptr = key;
+ if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) {
+ const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */
+ const uint8_t *k8;
+
+ /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
+ while (length > 12)
+ {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ mix(a,b,c);
+ length -= 12;
+ k += 3;
+ }
+
+ /*----------------------------- handle the last (probably partial) block */
+ /*
+ * "k[2]&0xffffff" actually reads beyond the end of the string, but
+ * then masks off the part it's not allowed to read. Because the
+ * string is aligned, the masked-off tail is in the same word as the
+ * rest of the string. Every machine with memory protection I've seen
+ * does it on word boundaries, so is OK with this. But VALGRIND will
+ * still catch it and complain. The masking trick does make the hash
+ * noticeably faster for short strings (like English words).
+ *
+ * Not on my testing with gcc 4.5 on an intel i5 CPU, at least --RR.
+ */
+#if 0
+ switch(length)
+ {
+ case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
+ case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break;
+ case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break;
+ case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break;
+ case 8 : b+=k[1]; a+=k[0]; break;
+ case 7 : b+=k[1]&0xffffff; a+=k[0]; break;
+ case 6 : b+=k[1]&0xffff; a+=k[0]; break;
+ case 5 : b+=k[1]&0xff; a+=k[0]; break;
+ case 4 : a+=k[0]; break;
+ case 3 : a+=k[0]&0xffffff; break;
+ case 2 : a+=k[0]&0xffff; break;
+ case 1 : a+=k[0]&0xff; break;
+ case 0 : return c; /* zero length strings require no mixing */
+ }
+
+#else /* make valgrind happy */
+
+ k8 = (const uint8_t *)k;
+ switch(length)
+ {
+ case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
+ case 11: c+=((uint32_t)k8[10])<<16; /* fall through */
+ case 10: c+=((uint32_t)k8[9])<<8; /* fall through */
+ case 9 : c+=k8[8]; /* fall through */
+ case 8 : b+=k[1]; a+=k[0]; break;
+ case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */
+ case 6 : b+=((uint32_t)k8[5])<<8; /* fall through */
+ case 5 : b+=k8[4]; /* fall through */
+ case 4 : a+=k[0]; break;
+ case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */
+ case 2 : a+=((uint32_t)k8[1])<<8; /* fall through */
+ case 1 : a+=k8[0]; break;
+ case 0 : return c;
+ }
+
+#endif /* !valgrind */
+
+ } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) {
+ const uint16_t *k = (const uint16_t *)key; /* read 16-bit chunks */
+ const uint8_t *k8;
+
+ /*--------------- all but last block: aligned reads and different mixing */
+ while (length > 12)
+ {
+ a += k[0] + (((uint32_t)k[1])<<16);
+ b += k[2] + (((uint32_t)k[3])<<16);
+ c += k[4] + (((uint32_t)k[5])<<16);
+ mix(a,b,c);
+ length -= 12;
+ k += 6;
+ }
+
+ /*----------------------------- handle the last (probably partial) block */
+ k8 = (const uint8_t *)k;
+ switch(length)
+ {
+ case 12: c+=k[4]+(((uint32_t)k[5])<<16);
+ b+=k[2]+(((uint32_t)k[3])<<16);
+ a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 11: c+=((uint32_t)k8[10])<<16; /* fall through */
+ case 10: c+=k[4];
+ b+=k[2]+(((uint32_t)k[3])<<16);
+ a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 9 : c+=k8[8]; /* fall through */
+ case 8 : b+=k[2]+(((uint32_t)k[3])<<16);
+ a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */
+ case 6 : b+=k[2];
+ a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 5 : b+=k8[4]; /* fall through */
+ case 4 : a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */
+ case 2 : a+=k[0];
+ break;
+ case 1 : a+=k8[0];
+ break;
+ case 0 : return c; /* zero length requires no mixing */
+ }
+
+ } else { /* need to read the key one byte at a time */
+ const uint8_t *k = (const uint8_t *)key;
+
+ /*--------------- all but the last block: affect some 32 bits of (a,b,c) */
+ while (length > 12)
+ {
+ a += k[0];
+ a += ((uint32_t)k[1])<<8;
+ a += ((uint32_t)k[2])<<16;
+ a += ((uint32_t)k[3])<<24;
+ b += k[4];
+ b += ((uint32_t)k[5])<<8;
+ b += ((uint32_t)k[6])<<16;
+ b += ((uint32_t)k[7])<<24;
+ c += k[8];
+ c += ((uint32_t)k[9])<<8;
+ c += ((uint32_t)k[10])<<16;
+ c += ((uint32_t)k[11])<<24;
+ mix(a,b,c);
+ length -= 12;
+ k += 12;
+ }
+
+ /*-------------------------------- last block: affect all 32 bits of (c) */
+ switch(length) /* all the case statements fall through */
+ {
+ case 12: c+=((uint32_t)k[11])<<24;
+ case 11: c+=((uint32_t)k[10])<<16;
+ case 10: c+=((uint32_t)k[9])<<8;
+ case 9 : c+=k[8];
+ case 8 : b+=((uint32_t)k[7])<<24;
+ case 7 : b+=((uint32_t)k[6])<<16;
+ case 6 : b+=((uint32_t)k[5])<<8;
+ case 5 : b+=k[4];
+ case 4 : a+=((uint32_t)k[3])<<24;
+ case 3 : a+=((uint32_t)k[2])<<16;
+ case 2 : a+=((uint32_t)k[1])<<8;
+ case 1 : a+=k[0];
+ break;
+ case 0 : return c;
+ }
+ }
+
+ final(a,b,c);
+ *val2 = b;
+ return c;
+}
+
+/*
+ * hashbig():
+ * This is the same as hash_word() on big-endian machines. It is different
+ * from hashlittle() on all machines. hashbig() takes advantage of
+ * big-endian byte ordering.
+ */
+static uint32_t hashbig( const void *key, size_t length, uint32_t *val2)
+{
+ uint32_t a,b,c;
+ union { const void *ptr; size_t i; } u; /* to cast key to (size_t) happily */
+
+ /* Set up the internal state */
+ a = b = c = 0xdeadbeef + ((uint32_t)length) + *val2;
+
+ u.ptr = key;
+ if (HASH_BIG_ENDIAN && ((u.i & 0x3) == 0)) {
+ const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */
+ const uint8_t *k8;
+
+ /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
+ while (length > 12)
+ {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ mix(a,b,c);
+ length -= 12;
+ k += 3;
+ }
+
+ /*----------------------------- handle the last (probably partial) block */
+ /*
+ * "k[2]<<8" actually reads beyond the end of the string, but
+ * then shifts out the part it's not allowed to read. Because the
+ * string is aligned, the illegal read is in the same word as the
+ * rest of the string. Every machine with memory protection I've seen
+ * does it on word boundaries, so is OK with this. But VALGRIND will
+ * still catch it and complain. The masking trick does make the hash
+ * noticeably faster for short strings (like English words).
+ *
+ * Not on my testing with gcc 4.5 on an intel i5 CPU, at least --RR.
+ */
+#if 0
+ switch(length)
+ {
+ case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
+ case 11: c+=k[2]&0xffffff00; b+=k[1]; a+=k[0]; break;
+ case 10: c+=k[2]&0xffff0000; b+=k[1]; a+=k[0]; break;
+ case 9 : c+=k[2]&0xff000000; b+=k[1]; a+=k[0]; break;
+ case 8 : b+=k[1]; a+=k[0]; break;
+ case 7 : b+=k[1]&0xffffff00; a+=k[0]; break;
+ case 6 : b+=k[1]&0xffff0000; a+=k[0]; break;
+ case 5 : b+=k[1]&0xff000000; a+=k[0]; break;
+ case 4 : a+=k[0]; break;
+ case 3 : a+=k[0]&0xffffff00; break;
+ case 2 : a+=k[0]&0xffff0000; break;
+ case 1 : a+=k[0]&0xff000000; break;
+ case 0 : return c; /* zero length strings require no mixing */
+ }
+
+#else /* make valgrind happy */
+
+ k8 = (const uint8_t *)k;
+ switch(length) /* all the case statements fall through */
+ {
+ case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
+ case 11: c+=((uint32_t)k8[10])<<8; /* fall through */
+ case 10: c+=((uint32_t)k8[9])<<16; /* fall through */
+ case 9 : c+=((uint32_t)k8[8])<<24; /* fall through */
+ case 8 : b+=k[1]; a+=k[0]; break;
+ case 7 : b+=((uint32_t)k8[6])<<8; /* fall through */
+ case 6 : b+=((uint32_t)k8[5])<<16; /* fall through */
+ case 5 : b+=((uint32_t)k8[4])<<24; /* fall through */
+ case 4 : a+=k[0]; break;
+ case 3 : a+=((uint32_t)k8[2])<<8; /* fall through */
+ case 2 : a+=((uint32_t)k8[1])<<16; /* fall through */
+ case 1 : a+=((uint32_t)k8[0])<<24; break;
+ case 0 : return c;
+ }
+
+#endif /* !VALGRIND */
+
+ } else { /* need to read the key one byte at a time */
+ const uint8_t *k = (const uint8_t *)key;
+
+ /*--------------- all but the last block: affect some 32 bits of (a,b,c) */
+ while (length > 12)
+ {
+ a += ((uint32_t)k[0])<<24;
+ a += ((uint32_t)k[1])<<16;
+ a += ((uint32_t)k[2])<<8;
+ a += ((uint32_t)k[3]);
+ b += ((uint32_t)k[4])<<24;
+ b += ((uint32_t)k[5])<<16;
+ b += ((uint32_t)k[6])<<8;
+ b += ((uint32_t)k[7]);
+ c += ((uint32_t)k[8])<<24;
+ c += ((uint32_t)k[9])<<16;
+ c += ((uint32_t)k[10])<<8;
+ c += ((uint32_t)k[11]);
+ mix(a,b,c);
+ length -= 12;
+ k += 12;
+ }
+
+ /*-------------------------------- last block: affect all 32 bits of (c) */
+ switch(length) /* all the case statements fall through */
+ {
+ case 12: c+=k[11];
+ case 11: c+=((uint32_t)k[10])<<8;
+ case 10: c+=((uint32_t)k[9])<<16;
+ case 9 : c+=((uint32_t)k[8])<<24;
+ case 8 : b+=k[7];
+ case 7 : b+=((uint32_t)k[6])<<8;
+ case 6 : b+=((uint32_t)k[5])<<16;
+ case 5 : b+=((uint32_t)k[4])<<24;
+ case 4 : a+=k[3];
+ case 3 : a+=((uint32_t)k[2])<<8;
+ case 2 : a+=((uint32_t)k[1])<<16;
+ case 1 : a+=((uint32_t)k[0])<<24;
+ break;
+ case 0 : return c;
+ }
+ }
+
+ final(a,b,c);
+ *val2 = b;
+ return c;
+}
+
+/* I basically use hashlittle here, but use native endian within each
+ * element. This delivers least-surprise: hash such as "int arr[] = {
+ * 1, 2 }; hash_stable(arr, 2, 0);" will be the same on big and little
+ * endian machines, even though a bytewise hash wouldn't be. */
+uint64_t hash64_stable_64(const void *key, size_t n, uint64_t base)
+{
+ const uint64_t *k = key;
+ uint32_t a,b,c;
+
+ /* Set up the internal state */
+ a = b = c = 0xdeadbeef + ((uint32_t)n*8) + (base >> 32) + base;
+
+ while (n > 3) {
+ a += (uint32_t)k[0];
+ b += (uint32_t)(k[0] >> 32);
+ c += (uint32_t)k[1];
+ mix(a,b,c);
+ a += (uint32_t)(k[1] >> 32);
+ b += (uint32_t)k[2];
+ c += (uint32_t)(k[2] >> 32);
+ mix(a,b,c);
+ n -= 3;
+ k += 3;
+ }
+ switch (n) {
+ case 2:
+ a += (uint32_t)k[0];
+ b += (uint32_t)(k[0] >> 32);
+ c += (uint32_t)k[1];
+ mix(a,b,c);
+ a += (uint32_t)(k[1] >> 32);
+ break;
+ case 1:
+ a += (uint32_t)k[0];
+ b += (uint32_t)(k[0] >> 32);
+ break;
+ case 0:
+ return c;
+ }
+ final(a,b,c);
+ return ((uint64_t)b << 32) | c;
+}
+
+uint64_t hash64_stable_32(const void *key, size_t n, uint64_t base)
+{
+ const uint32_t *k = key;
+ uint32_t a,b,c;
+
+ /* Set up the internal state */
+ a = b = c = 0xdeadbeef + ((uint32_t)n*4) + (base >> 32) + base;
+
+ while (n > 3) {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ mix(a,b,c);
+
+ n -= 3;
+ k += 3;
+ }
+ switch (n) {
+ case 2:
+ b += (uint32_t)k[1];
+ case 1:
+ a += (uint32_t)k[0];
+ break;
+ case 0:
+ return c;
+ }
+ final(a,b,c);
+ return ((uint64_t)b << 32) | c;
+}
+
+uint64_t hash64_stable_16(const void *key, size_t n, uint64_t base)
+{
+ const uint16_t *k = key;
+ uint32_t a,b,c;
+
+ /* Set up the internal state */
+ a = b = c = 0xdeadbeef + ((uint32_t)n*2) + (base >> 32) + base;
+
+ while (n > 6) {
+ a += (uint32_t)k[0] + ((uint32_t)k[1] << 16);
+ b += (uint32_t)k[2] + ((uint32_t)k[3] << 16);
+ c += (uint32_t)k[4] + ((uint32_t)k[5] << 16);
+ mix(a,b,c);
+
+ n -= 6;
+ k += 6;
+ }
+
+ switch (n) {
+ case 5:
+ c += (uint32_t)k[4];
+ case 4:
+ b += ((uint32_t)k[3] << 16);
+ case 3:
+ b += (uint32_t)k[2];
+ case 2:
+ a += ((uint32_t)k[1] << 16);
+ case 1:
+ a += (uint32_t)k[0];
+ break;
+ case 0:
+ return c;
+ }
+ final(a,b,c);
+ return ((uint64_t)b << 32) | c;
+}
+
+uint64_t hash64_stable_8(const void *key, size_t n, uint64_t base)
+{
+ uint32_t b32 = base + (base >> 32);
+ uint32_t lower = hashlittle(key, n, &b32);
+
+ return ((uint64_t)b32 << 32) | lower;
+}
+
+uint32_t hash_any(const void *key, size_t length, uint32_t base)
+{
+ if (HASH_BIG_ENDIAN)
+ return hashbig(key, length, &base);
+ else
+ return hashlittle(key, length, &base);
+}
+
+uint32_t hash_stable_64(const void *key, size_t n, uint32_t base)
+{
+ return hash64_stable_64(key, n, base);
+}
+
+uint32_t hash_stable_32(const void *key, size_t n, uint32_t base)
+{
+ return hash64_stable_32(key, n, base);
+}
+
+uint32_t hash_stable_16(const void *key, size_t n, uint32_t base)
+{
+ return hash64_stable_16(key, n, base);
+}
+
+uint32_t hash_stable_8(const void *key, size_t n, uint32_t base)
+{
+ return hashlittle(key, n, &base);
+}
+
+/* Jenkins' lookup8 is a 64 bit hash, but he says it's obsolete. Use
+ * the plain one and recombine into 64 bits. */
+uint64_t hash64_any(const void *key, size_t length, uint64_t base)
+{
+ uint32_t b32 = base + (base >> 32);
+ uint32_t lower;
+
+ if (HASH_BIG_ENDIAN)
+ lower = hashbig(key, length, &b32);
+ else
+ lower = hashlittle(key, length, &b32);
+
+ return ((uint64_t)b32 << 32) | lower;
+}
+
+#ifdef SELF_TEST
+
+/* used for timings */
+void driver1()
+{
+ uint8_t buf[256];
+ uint32_t i;
+ uint32_t h=0;
+ time_t a,z;
+
+ time(&a);
+ for (i=0; i<256; ++i) buf[i] = 'x';
+ for (i=0; i<1; ++i)
+ {
+ h = hashlittle(&buf[0],1,h);
+ }
+ time(&z);
+ if (z-a > 0) printf("time %d %.8x\n", z-a, h);
+}
+
+/* check that every input bit changes every output bit half the time */
+#define HASHSTATE 1
+#define HASHLEN 1
+#define MAXPAIR 60
+#define MAXLEN 70
+void driver2()
+{
+ uint8_t qa[MAXLEN+1], qb[MAXLEN+2], *a = &qa[0], *b = &qb[1];
+ uint32_t c[HASHSTATE], d[HASHSTATE], i=0, j=0, k, l, m=0, z;
+ uint32_t e[HASHSTATE],f[HASHSTATE],g[HASHSTATE],h[HASHSTATE];
+ uint32_t x[HASHSTATE],y[HASHSTATE];
+ uint32_t hlen;
+
+ printf("No more than %d trials should ever be needed \n",MAXPAIR/2);
+ for (hlen=0; hlen < MAXLEN; ++hlen)
+ {
+ z=0;
+ for (i=0; i<hlen; ++i) /*----------------------- for each input byte, */
+ {
+ for (j=0; j<8; ++j) /*------------------------ for each input bit, */
+ {
+ for (m=1; m<8; ++m) /*------------ for several possible initvals, */
+ {
+ for (l=0; l<HASHSTATE; ++l)
+ e[l]=f[l]=g[l]=h[l]=x[l]=y[l]=~((uint32_t)0);
+
+ /*---- check that every output bit is affected by that input bit */
+ for (k=0; k<MAXPAIR; k+=2)
+ {
+ uint32_t finished=1;
+ /* keys have one bit different */
+ for (l=0; l<hlen+1; ++l) {a[l] = b[l] = (uint8_t)0;}
+ /* have a and b be two keys differing in only one bit */
+ a[i] ^= (k<<j);
+ a[i] ^= (k>>(8-j));
+ c[0] = hashlittle(a, hlen, m);
+ b[i] ^= ((k+1)<<j);
+ b[i] ^= ((k+1)>>(8-j));
+ d[0] = hashlittle(b, hlen, m);
+ /* check every bit is 1, 0, set, and not set at least once */
+ for (l=0; l<HASHSTATE; ++l)
+ {
+ e[l] &= (c[l]^d[l]);
+ f[l] &= ~(c[l]^d[l]);
+ g[l] &= c[l];
+ h[l] &= ~c[l];
+ x[l] &= d[l];
+ y[l] &= ~d[l];
+ if (e[l]|f[l]|g[l]|h[l]|x[l]|y[l]) finished=0;
+ }
+ if (finished) break;
+ }
+ if (k>z) z=k;
+ if (k==MAXPAIR)
+ {
+ printf("Some bit didn't change: ");
+ printf("%.8x %.8x %.8x %.8x %.8x %.8x ",
+ e[0],f[0],g[0],h[0],x[0],y[0]);
+ printf("i %d j %d m %d len %d\n", i, j, m, hlen);
+ }
+ if (z==MAXPAIR) goto done;
+ }
+ }
+ }
+ done:
+ if (z < MAXPAIR)
+ {
+ printf("Mix success %2d bytes %2d initvals ",i,m);
+ printf("required %d trials\n", z/2);
+ }
+ }
+ printf("\n");
+}
+
+/* Check for reading beyond the end of the buffer and alignment problems */
+void driver3()
+{
+ uint8_t buf[MAXLEN+20], *b;
+ uint32_t len;
+ uint8_t q[] = "This is the time for all good men to come to the aid of their country...";
+ uint32_t h;
+ uint8_t qq[] = "xThis is the time for all good men to come to the aid of their country...";
+ uint32_t i;
+ uint8_t qqq[] = "xxThis is the time for all good men to come to the aid of their country...";
+ uint32_t j;
+ uint8_t qqqq[] = "xxxThis is the time for all good men to come to the aid of their country...";
+ uint32_t ref,x,y;
+ uint8_t *p;
+
+ printf("Endianness. These lines should all be the same (for values filled in):\n");
+ printf("%.8x %.8x %.8x\n",
+ hash_word((const uint32_t *)q, (sizeof(q)-1)/4, 13),
+ hash_word((const uint32_t *)q, (sizeof(q)-5)/4, 13),
+ hash_word((const uint32_t *)q, (sizeof(q)-9)/4, 13));
+ p = q;
+ printf("%.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x\n",
+ hashlittle(p, sizeof(q)-1, 13), hashlittle(p, sizeof(q)-2, 13),
+ hashlittle(p, sizeof(q)-3, 13), hashlittle(p, sizeof(q)-4, 13),
+ hashlittle(p, sizeof(q)-5, 13), hashlittle(p, sizeof(q)-6, 13),
+ hashlittle(p, sizeof(q)-7, 13), hashlittle(p, sizeof(q)-8, 13),
+ hashlittle(p, sizeof(q)-9, 13), hashlittle(p, sizeof(q)-10, 13),
+ hashlittle(p, sizeof(q)-11, 13), hashlittle(p, sizeof(q)-12, 13));
+ p = &qq[1];
+ printf("%.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x\n",
+ hashlittle(p, sizeof(q)-1, 13), hashlittle(p, sizeof(q)-2, 13),
+ hashlittle(p, sizeof(q)-3, 13), hashlittle(p, sizeof(q)-4, 13),
+ hashlittle(p, sizeof(q)-5, 13), hashlittle(p, sizeof(q)-6, 13),
+ hashlittle(p, sizeof(q)-7, 13), hashlittle(p, sizeof(q)-8, 13),
+ hashlittle(p, sizeof(q)-9, 13), hashlittle(p, sizeof(q)-10, 13),
+ hashlittle(p, sizeof(q)-11, 13), hashlittle(p, sizeof(q)-12, 13));
+ p = &qqq[2];
+ printf("%.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x\n",
+ hashlittle(p, sizeof(q)-1, 13), hashlittle(p, sizeof(q)-2, 13),
+ hashlittle(p, sizeof(q)-3, 13), hashlittle(p, sizeof(q)-4, 13),
+ hashlittle(p, sizeof(q)-5, 13), hashlittle(p, sizeof(q)-6, 13),
+ hashlittle(p, sizeof(q)-7, 13), hashlittle(p, sizeof(q)-8, 13),
+ hashlittle(p, sizeof(q)-9, 13), hashlittle(p, sizeof(q)-10, 13),
+ hashlittle(p, sizeof(q)-11, 13), hashlittle(p, sizeof(q)-12, 13));
+ p = &qqqq[3];
+ printf("%.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x\n",
+ hashlittle(p, sizeof(q)-1, 13), hashlittle(p, sizeof(q)-2, 13),
+ hashlittle(p, sizeof(q)-3, 13), hashlittle(p, sizeof(q)-4, 13),
+ hashlittle(p, sizeof(q)-5, 13), hashlittle(p, sizeof(q)-6, 13),
+ hashlittle(p, sizeof(q)-7, 13), hashlittle(p, sizeof(q)-8, 13),
+ hashlittle(p, sizeof(q)-9, 13), hashlittle(p, sizeof(q)-10, 13),
+ hashlittle(p, sizeof(q)-11, 13), hashlittle(p, sizeof(q)-12, 13));
+ printf("\n");
+
+ /* check that hashlittle2 and hashlittle produce the same results */
+ i=47; j=0;
+ hashlittle2(q, sizeof(q), &i, &j);
+ if (hashlittle(q, sizeof(q), 47) != i)
+ printf("hashlittle2 and hashlittle mismatch\n");
+
+ /* check that hash_word2 and hash_word produce the same results */
+ len = 0xdeadbeef;
+ i=47, j=0;
+ hash_word2(&len, 1, &i, &j);
+ if (hash_word(&len, 1, 47) != i)
+ printf("hash_word2 and hash_word mismatch %x %x\n",
+ i, hash_word(&len, 1, 47));
+
+ /* check hashlittle doesn't read before or after the ends of the string */
+ for (h=0, b=buf+1; h<8; ++h, ++b)
+ {
+ for (i=0; i<MAXLEN; ++i)
+ {
+ len = i;
+ for (j=0; j<i; ++j) *(b+j)=0;
+
+ /* these should all be equal */
+ ref = hashlittle(b, len, (uint32_t)1);
+ *(b+i)=(uint8_t)~0;
+ *(b-1)=(uint8_t)~0;
+ x = hashlittle(b, len, (uint32_t)1);
+ y = hashlittle(b, len, (uint32_t)1);
+ if ((ref != x) || (ref != y))
+ {
+ printf("alignment error: %.8x %.8x %.8x %d %d\n",ref,x,y,
+ h, i);
+ }
+ }
+ }
+}
+
+/* check for problems with nulls */
+ void driver4()
+{
+ uint8_t buf[1];
+ uint32_t h,i,state[HASHSTATE];
+
+
+ buf[0] = ~0;
+ for (i=0; i<HASHSTATE; ++i) state[i] = 1;
+ printf("These should all be different\n");
+ for (i=0, h=0; i<8; ++i)
+ {
+ h = hashlittle(buf, 0, h);
+ printf("%2ld 0-byte strings, hash is %.8x\n", i, h);
+ }
+}
+
+
+int main()
+{
+ driver1(); /* test that the key is hashed: used for timings */
+ driver2(); /* test that whole key is hashed thoroughly */
+ driver3(); /* test that nothing but the key is hashed */
+ driver4(); /* test hashing multiple buffers (all buffers are null) */
+ return 1;
+}
+
+#endif /* SELF_TEST */
diff --git a/ccan/ccan/hash/hash.h b/ccan/ccan/hash/hash.h
new file mode 100644
index 0000000..2170684
--- /dev/null
+++ b/ccan/ccan/hash/hash.h
@@ -0,0 +1,313 @@
+/* CC0 (Public domain) - see LICENSE file for details */
+#ifndef CCAN_HASH_H
+#define CCAN_HASH_H
+#include "config.h"
+#include <stdint.h>
+#include <stdlib.h>
+#include <ccan/build_assert/build_assert.h>
+
+/* Stolen mostly from: lookup3.c, by Bob Jenkins, May 2006, Public Domain.
+ *
+ * http://burtleburtle.net/bob/c/lookup3.c
+ */
+
+/**
+ * hash - fast hash of an array for internal use
+ * @p: the array or pointer to first element
+ * @num: the number of elements to hash
+ * @base: the base number to roll into the hash (usually 0)
+ *
+ * The memory region pointed to by p is combined with the base to form
+ * a 32-bit hash.
+ *
+ * This hash will have different results on different machines, so is
+ * only useful for internal hashes (ie. not hashes sent across the
+ * network or saved to disk).
+ *
+ * It may also change with future versions: it could even detect at runtime
+ * what the fastest hash to use is.
+ *
+ * See also: hash64, hash_stable.
+ *
+ * Example:
+ * #include <ccan/hash/hash.h>
+ * #include <err.h>
+ * #include <stdio.h>
+ * #include <string.h>
+ *
+ * // Simple demonstration: idential strings will have the same hash, but
+ * // two different strings will probably not.
+ * int main(int argc, char *argv[])
+ * {
+ * uint32_t hash1, hash2;
+ *
+ * if (argc != 3)
+ * err(1, "Usage: %s <string1> <string2>", argv[0]);
+ *
+ * hash1 = hash(argv[1], strlen(argv[1]), 0);
+ * hash2 = hash(argv[2], strlen(argv[2]), 0);
+ * printf("Hash is %s\n", hash1 == hash2 ? "same" : "different");
+ * return 0;
+ * }
+ */
+#define hash(p, num, base) hash_any((p), (num)*sizeof(*(p)), (base))
+
+/**
+ * hash_stable - hash of an array for external use
+ * @p: the array or pointer to first element
+ * @num: the number of elements to hash
+ * @base: the base number to roll into the hash (usually 0)
+ *
+ * The array of simple integer types pointed to by p is combined with
+ * the base to form a 32-bit hash.
+ *
+ * This hash will have the same results on different machines, so can
+ * be used for external hashes (ie. hashes sent across the network or
+ * saved to disk). The results will not change in future versions of
+ * this module.
+ *
+ * Note that it is only legal to hand an array of simple integer types
+ * to this hash (ie. char, uint16_t, int64_t, etc). In these cases,
+ * the same values will have the same hash result, even though the
+ * memory representations of integers depend on the machine
+ * endianness.
+ *
+ * See also:
+ * hash64_stable
+ *
+ * Example:
+ * #include <ccan/hash/hash.h>
+ * #include <err.h>
+ * #include <stdio.h>
+ * #include <string.h>
+ *
+ * int main(int argc, char *argv[])
+ * {
+ * if (argc != 2)
+ * err(1, "Usage: %s <string-to-hash>", argv[0]);
+ *
+ * printf("Hash stable result is %u\n",
+ * hash_stable(argv[1], strlen(argv[1]), 0));
+ * return 0;
+ * }
+ */
+#define hash_stable(p, num, base) \
+ (BUILD_ASSERT_OR_ZERO(sizeof(*(p)) == 8 || sizeof(*(p)) == 4 \
+ || sizeof(*(p)) == 2 || sizeof(*(p)) == 1) + \
+ sizeof(*(p)) == 8 ? hash_stable_64((p), (num), (base)) \
+ : sizeof(*(p)) == 4 ? hash_stable_32((p), (num), (base)) \
+ : sizeof(*(p)) == 2 ? hash_stable_16((p), (num), (base)) \
+ : hash_stable_8((p), (num), (base)))
+
+/**
+ * hash_u32 - fast hash an array of 32-bit values for internal use
+ * @key: the array of uint32_t
+ * @num: the number of elements to hash
+ * @base: the base number to roll into the hash (usually 0)
+ *
+ * The array of uint32_t pointed to by @key is combined with the base
+ * to form a 32-bit hash. This is 2-3 times faster than hash() on small
+ * arrays, but the advantage vanishes over large hashes.
+ *
+ * This hash will have different results on different machines, so is
+ * only useful for internal hashes (ie. not hashes sent across the
+ * network or saved to disk).
+ */
+uint32_t hash_u32(const uint32_t *key, size_t num, uint32_t base);
+
+/**
+ * hash_string - very fast hash of an ascii string
+ * @str: the nul-terminated string
+ *
+ * The string is hashed, using a hash function optimized for ASCII and
+ * similar strings. It's weaker than the other hash functions.
+ *
+ * This hash may have different results on different machines, so is
+ * only useful for internal hashes (ie. not hashes sent across the
+ * network or saved to disk). The results will be different from the
+ * other hash functions in this module, too.
+ */
+static inline uint32_t hash_string(const char *string)
+{
+ /* This is Karl Nelson <kenelson@ece.ucdavis.edu>'s X31 hash.
+ * It's a little faster than the (much better) lookup3 hash(): 56ns vs
+ * 84ns on my 2GHz Intel Core Duo 2 laptop for a 10 char string. */
+ uint32_t ret;
+
+ for (ret = 0; *string; string++)
+ ret = (ret << 5) - ret + *string;
+
+ return ret;
+}
+
+/**
+ * hash64 - fast 64-bit hash of an array for internal use
+ * @p: the array or pointer to first element
+ * @num: the number of elements to hash
+ * @base: the 64-bit base number to roll into the hash (usually 0)
+ *
+ * The memory region pointed to by p is combined with the base to form
+ * a 64-bit hash.
+ *
+ * This hash will have different results on different machines, so is
+ * only useful for internal hashes (ie. not hashes sent across the
+ * network or saved to disk).
+ *
+ * It may also change with future versions: it could even detect at runtime
+ * what the fastest hash to use is.
+ *
+ * See also: hash.
+ *
+ * Example:
+ * #include <ccan/hash/hash.h>
+ * #include <err.h>
+ * #include <stdio.h>
+ * #include <string.h>
+ *
+ * // Simple demonstration: idential strings will have the same hash, but
+ * // two different strings will probably not.
+ * int main(int argc, char *argv[])
+ * {
+ * uint64_t hash1, hash2;
+ *
+ * if (argc != 3)
+ * err(1, "Usage: %s <string1> <string2>", argv[0]);
+ *
+ * hash1 = hash64(argv[1], strlen(argv[1]), 0);
+ * hash2 = hash64(argv[2], strlen(argv[2]), 0);
+ * printf("Hash is %s\n", hash1 == hash2 ? "same" : "different");
+ * return 0;
+ * }
+ */
+#define hash64(p, num, base) hash64_any((p), (num)*sizeof(*(p)), (base))
+
+/**
+ * hash64_stable - 64 bit hash of an array for external use
+ * @p: the array or pointer to first element
+ * @num: the number of elements to hash
+ * @base: the base number to roll into the hash (usually 0)
+ *
+ * The array of simple integer types pointed to by p is combined with
+ * the base to form a 64-bit hash.
+ *
+ * This hash will have the same results on different machines, so can
+ * be used for external hashes (ie. hashes sent across the network or
+ * saved to disk). The results will not change in future versions of
+ * this module.
+ *
+ * Note that it is only legal to hand an array of simple integer types
+ * to this hash (ie. char, uint16_t, int64_t, etc). In these cases,
+ * the same values will have the same hash result, even though the
+ * memory representations of integers depend on the machine
+ * endianness.
+ *
+ * See also:
+ * hash_stable
+ *
+ * Example:
+ * #include <ccan/hash/hash.h>
+ * #include <err.h>
+ * #include <stdio.h>
+ * #include <string.h>
+ *
+ * int main(int argc, char *argv[])
+ * {
+ * if (argc != 2)
+ * err(1, "Usage: %s <string-to-hash>", argv[0]);
+ *
+ * printf("Hash stable result is %llu\n",
+ * (long long)hash64_stable(argv[1], strlen(argv[1]), 0));
+ * return 0;
+ * }
+ */
+#define hash64_stable(p, num, base) \
+ (BUILD_ASSERT_OR_ZERO(sizeof(*(p)) == 8 || sizeof(*(p)) == 4 \
+ || sizeof(*(p)) == 2 || sizeof(*(p)) == 1) + \
+ sizeof(*(p)) == 8 ? hash64_stable_64((p), (num), (base)) \
+ : sizeof(*(p)) == 4 ? hash64_stable_32((p), (num), (base)) \
+ : sizeof(*(p)) == 2 ? hash64_stable_16((p), (num), (base)) \
+ : hash64_stable_8((p), (num), (base)))
+
+
+/**
+ * hashl - fast 32/64-bit hash of an array for internal use
+ * @p: the array or pointer to first element
+ * @num: the number of elements to hash
+ * @base: the base number to roll into the hash (usually 0)
+ *
+ * This is either hash() or hash64(), on 32/64 bit long machines.
+ */
+#define hashl(p, num, base) \
+ (BUILD_ASSERT_OR_ZERO(sizeof(long) == sizeof(uint32_t) \
+ || sizeof(long) == sizeof(uint64_t)) + \
+ (sizeof(long) == sizeof(uint64_t) \
+ ? hash64((p), (num), (base)) : hash((p), (num), (base))))
+
+/* Our underlying operations. */
+uint32_t hash_any(const void *key, size_t length, uint32_t base);
+uint32_t hash_stable_64(const void *key, size_t n, uint32_t base);
+uint32_t hash_stable_32(const void *key, size_t n, uint32_t base);
+uint32_t hash_stable_16(const void *key, size_t n, uint32_t base);
+uint32_t hash_stable_8(const void *key, size_t n, uint32_t base);
+uint64_t hash64_any(const void *key, size_t length, uint64_t base);
+uint64_t hash64_stable_64(const void *key, size_t n, uint64_t base);
+uint64_t hash64_stable_32(const void *key, size_t n, uint64_t base);
+uint64_t hash64_stable_16(const void *key, size_t n, uint64_t base);
+uint64_t hash64_stable_8(const void *key, size_t n, uint64_t base);
+
+/**
+ * hash_pointer - hash a pointer for internal use
+ * @p: the pointer value to hash
+ * @base: the base number to roll into the hash (usually 0)
+ *
+ * The pointer p (not what p points to!) is combined with the base to form
+ * a 32-bit hash.
+ *
+ * This hash will have different results on different machines, so is
+ * only useful for internal hashes (ie. not hashes sent across the
+ * network or saved to disk).
+ *
+ * Example:
+ * #include <ccan/hash/hash.h>
+ *
+ * // Code to keep track of memory regions.
+ * struct region {
+ * struct region *chain;
+ * void *start;
+ * unsigned int size;
+ * };
+ * // We keep a simple hash table.
+ * static struct region *region_hash[128];
+ *
+ * static void add_region(struct region *r)
+ * {
+ * unsigned int h = hash_pointer(r->start, 0);
+ *
+ * r->chain = region_hash[h];
+ * region_hash[h] = r->chain;
+ * }
+ *
+ * static struct region *find_region(const void *start)
+ * {
+ * struct region *r;
+ *
+ * for (r = region_hash[hash_pointer(start, 0)]; r; r = r->chain)
+ * if (r->start == start)
+ * return r;
+ * return NULL;
+ * }
+ */
+static inline uint32_t hash_pointer(const void *p, uint32_t base)
+{
+ if (sizeof(p) % sizeof(uint32_t) == 0) {
+ /* This convoluted union is the right way of aliasing. */
+ union {
+ uint32_t a[sizeof(p) / sizeof(uint32_t)];
+ const void *p;
+ } u;
+ u.p = p;
+ return hash_u32(u.a, sizeof(p) / sizeof(uint32_t), base);
+ } else
+ return hash(&p, 1, base);
+}
+#endif /* HASH_H */
diff --git a/ccan/ccan/htable/LICENSE b/ccan/ccan/htable/LICENSE
new file mode 120000
index 0000000..dc314ec
--- /dev/null
+++ b/ccan/ccan/htable/LICENSE
@@ -0,0 +1 @@
+../../licenses/LGPL-2.1 \ No newline at end of file
diff --git a/ccan/ccan/htable/htable.c b/ccan/ccan/htable/htable.c
new file mode 100644
index 0000000..f631ffe
--- /dev/null
+++ b/ccan/ccan/htable/htable.c
@@ -0,0 +1,491 @@
+/* Licensed under LGPLv2+ - see LICENSE file for details */
+#include <ccan/htable/htable.h>
+#include <ccan/compiler/compiler.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <limits.h>
+#include <stdbool.h>
+#include <assert.h>
+#include <string.h>
+
+/* We use 0x1 as deleted marker. */
+#define HTABLE_DELETED (0x1)
+
+/* perfect_bitnum 63 means there's no perfect bitnum */
+#define NO_PERFECT_BIT (sizeof(uintptr_t) * CHAR_BIT - 1)
+
+static void *htable_default_alloc(struct htable *ht, size_t len)
+{
+ return calloc(len, 1);
+}
+
+static void htable_default_free(struct htable *ht, void *p)
+{
+ free(p);
+}
+
+static void *(*htable_alloc)(struct htable *, size_t) = htable_default_alloc;
+static void (*htable_free)(struct htable *, void *) = htable_default_free;
+
+void htable_set_allocator(void *(*alloc)(struct htable *, size_t len),
+ void (*free)(struct htable *, void *p))
+{
+ if (!alloc)
+ alloc = htable_default_alloc;
+ if (!free)
+ free = htable_default_free;
+ htable_alloc = alloc;
+ htable_free = free;
+}
+
+/* We clear out the bits which are always the same, and put metadata there. */
+static inline uintptr_t get_extra_ptr_bits(const struct htable *ht,
+ uintptr_t e)
+{
+ return e & ht->common_mask;
+}
+
+static inline void *get_raw_ptr(const struct htable *ht, uintptr_t e)
+{
+ return (void *)((e & ~ht->common_mask) | ht->common_bits);
+}
+
+static inline uintptr_t make_hval(const struct htable *ht,
+ const void *p, uintptr_t bits)
+{
+ return ((uintptr_t)p & ~ht->common_mask) | bits;
+}
+
+static inline bool entry_is_valid(uintptr_t e)
+{
+ return e > HTABLE_DELETED;
+}
+
+static inline uintptr_t ht_perfect_mask(const struct htable *ht)
+{
+ return (uintptr_t)2 << ht->perfect_bitnum;
+}
+
+static inline uintptr_t get_hash_ptr_bits(const struct htable *ht,
+ size_t hash)
+{
+ /* Shuffling the extra bits (as specified in mask) down the
+ * end is quite expensive. But the lower bits are redundant, so
+ * we fold the value first. */
+ return (hash ^ (hash >> ht->bits))
+ & ht->common_mask & ~ht_perfect_mask(ht);
+}
+
+void htable_init(struct htable *ht,
+ size_t (*rehash)(const void *elem, void *priv), void *priv)
+{
+ struct htable empty = HTABLE_INITIALIZER(empty, NULL, NULL);
+ *ht = empty;
+ ht->rehash = rehash;
+ ht->priv = priv;
+ ht->table = &ht->common_bits;
+}
+
+/* Fill to 87.5% */
+static inline size_t ht_max(const struct htable *ht)
+{
+ return ((size_t)7 << ht->bits) / 8;
+}
+
+/* Clean deleted if we're full, and more than 12.5% deleted */
+static inline size_t ht_max_deleted(const struct htable *ht)
+{
+ return ((size_t)1 << ht->bits) / 8;
+}
+
+bool htable_init_sized(struct htable *ht,
+ size_t (*rehash)(const void *, void *),
+ void *priv, size_t expect)
+{
+ htable_init(ht, rehash, priv);
+
+ /* Don't go insane with sizing. */
+ for (ht->bits = 1; ht_max(ht) < expect; ht->bits++) {
+ if (ht->bits == 30)
+ break;
+ }
+
+ ht->table = htable_alloc(ht, sizeof(size_t) << ht->bits);
+ if (!ht->table) {
+ ht->table = &ht->common_bits;
+ return false;
+ }
+ (void)htable_debug(ht, HTABLE_LOC);
+ return true;
+}
+
+void htable_clear(struct htable *ht)
+{
+ if (ht->table != &ht->common_bits)
+ htable_free(ht, (void *)ht->table);
+ htable_init(ht, ht->rehash, ht->priv);
+}
+
+bool htable_copy_(struct htable *dst, const struct htable *src)
+{
+ uintptr_t *htable = htable_alloc(dst, sizeof(size_t) << src->bits);
+
+ if (!htable)
+ return false;
+
+ *dst = *src;
+ dst->table = htable;
+ memcpy(dst->table, src->table, sizeof(size_t) << src->bits);
+ return true;
+}
+
+static size_t hash_bucket(const struct htable *ht, size_t h)
+{
+ return h & ((1 << ht->bits)-1);
+}
+
+static void *htable_val(const struct htable *ht,
+ struct htable_iter *i, size_t hash, uintptr_t perfect)
+{
+ uintptr_t h2 = get_hash_ptr_bits(ht, hash) | perfect;
+
+ while (ht->table[i->off]) {
+ if (ht->table[i->off] != HTABLE_DELETED) {
+ if (get_extra_ptr_bits(ht, ht->table[i->off]) == h2)
+ return get_raw_ptr(ht, ht->table[i->off]);
+ }
+ i->off = (i->off + 1) & ((1 << ht->bits)-1);
+ h2 &= ~perfect;
+ }
+ return NULL;
+}
+
+void *htable_firstval_(const struct htable *ht,
+ struct htable_iter *i, size_t hash)
+{
+ i->off = hash_bucket(ht, hash);
+ return htable_val(ht, i, hash, ht_perfect_mask(ht));
+}
+
+void *htable_nextval_(const struct htable *ht,
+ struct htable_iter *i, size_t hash)
+{
+ i->off = (i->off + 1) & ((1 << ht->bits)-1);
+ return htable_val(ht, i, hash, 0);
+}
+
+void *htable_first_(const struct htable *ht, struct htable_iter *i)
+{
+ for (i->off = 0; i->off < (size_t)1 << ht->bits; i->off++) {
+ if (entry_is_valid(ht->table[i->off]))
+ return get_raw_ptr(ht, ht->table[i->off]);
+ }
+ return NULL;
+}
+
+void *htable_next_(const struct htable *ht, struct htable_iter *i)
+{
+ for (i->off++; i->off < (size_t)1 << ht->bits; i->off++) {
+ if (entry_is_valid(ht->table[i->off]))
+ return get_raw_ptr(ht, ht->table[i->off]);
+ }
+ return NULL;
+}
+
+void *htable_prev_(const struct htable *ht, struct htable_iter *i)
+{
+ for (;;) {
+ if (!i->off)
+ return NULL;
+ i->off--;
+ if (entry_is_valid(ht->table[i->off]))
+ return get_raw_ptr(ht, ht->table[i->off]);
+ }
+}
+
+/* Another bit currently in mask needs to be exposed, so that a bucket with p in
+ * it won't appear invalid */
+static COLD void unset_another_common_bit(struct htable *ht,
+ uintptr_t *maskdiff,
+ const void *p)
+{
+ size_t i;
+
+ for (i = sizeof(uintptr_t) * CHAR_BIT - 1; i > 0; i--) {
+ if (((uintptr_t)p & ((uintptr_t)1 << i))
+ && ht->common_mask & ~*maskdiff & ((uintptr_t)1 << i))
+ break;
+ }
+ /* There must have been one, right? */
+ assert(i > 0);
+
+ *maskdiff |= ((uintptr_t)1 << i);
+}
+
+/* We want to change the common mask: this fixes up the table */
+static COLD void fixup_table_common(struct htable *ht, uintptr_t maskdiff)
+{
+ size_t i;
+ uintptr_t bitsdiff;
+
+again:
+ bitsdiff = ht->common_bits & maskdiff;
+
+ for (i = 0; i < (size_t)1 << ht->bits; i++) {
+ uintptr_t e;
+ if (!entry_is_valid(e = ht->table[i]))
+ continue;
+
+ /* Clear the bits no longer in the mask, set them as
+ * expected. */
+ e &= ~maskdiff;
+ e |= bitsdiff;
+ /* If this made it invalid, restart with more exposed */
+ if (!entry_is_valid(e)) {
+ unset_another_common_bit(ht, &maskdiff, get_raw_ptr(ht, e));
+ goto again;
+ }
+ ht->table[i] = e;
+ }
+
+ /* Take away those bits from our mask, bits and perfect bit. */
+ ht->common_mask &= ~maskdiff;
+ ht->common_bits &= ~maskdiff;
+ if (ht_perfect_mask(ht) & maskdiff)
+ ht->perfect_bitnum = NO_PERFECT_BIT;
+}
+
+/* Limited recursion */
+static void ht_add(struct htable *ht, const void *new, size_t h);
+
+/* We tried to add this entry, but it looked invalid! We need to
+ * let another pointer bit through mask */
+static COLD void update_common_fix_invalid(struct htable *ht, const void *p, size_t h)
+{
+ uintptr_t maskdiff;
+
+ assert(ht->elems != 0);
+
+ maskdiff = 0;
+ unset_another_common_bit(ht, &maskdiff, p);
+ fixup_table_common(ht, maskdiff);
+
+ /* Now won't recurse */
+ ht_add(ht, p, h);
+}
+
+/* This does not expand the hash table, that's up to caller. */
+static void ht_add(struct htable *ht, const void *new, size_t h)
+{
+ size_t i;
+ uintptr_t perfect = ht_perfect_mask(ht);
+
+ i = hash_bucket(ht, h);
+
+ while (entry_is_valid(ht->table[i])) {
+ perfect = 0;
+ i = (i + 1) & ((1 << ht->bits)-1);
+ }
+ ht->table[i] = make_hval(ht, new, get_hash_ptr_bits(ht, h)|perfect);
+ if (!entry_is_valid(ht->table[i]))
+ update_common_fix_invalid(ht, new, h);
+}
+
+static COLD bool double_table(struct htable *ht)
+{
+ unsigned int i;
+ size_t oldnum = (size_t)1 << ht->bits;
+ uintptr_t *oldtable, e;
+
+ oldtable = ht->table;
+ ht->table = htable_alloc(ht, sizeof(size_t) << (ht->bits+1));
+ if (!ht->table) {
+ ht->table = oldtable;
+ return false;
+ }
+ ht->bits++;
+
+ /* If we lost our "perfect bit", get it back now. */
+ if (ht->perfect_bitnum == NO_PERFECT_BIT && ht->common_mask) {
+ for (i = 0; i < sizeof(ht->common_mask) * CHAR_BIT; i++) {
+ if (ht->common_mask & ((size_t)2 << i)) {
+ ht->perfect_bitnum = i;
+ break;
+ }
+ }
+ }
+
+ if (oldtable != &ht->common_bits) {
+ for (i = 0; i < oldnum; i++) {
+ if (entry_is_valid(e = oldtable[i])) {
+ void *p = get_raw_ptr(ht, e);
+ ht_add(ht, p, ht->rehash(p, ht->priv));
+ }
+ }
+ htable_free(ht, oldtable);
+ }
+ ht->deleted = 0;
+
+ (void)htable_debug(ht, HTABLE_LOC);
+ return true;
+}
+
+static COLD void rehash_table(struct htable *ht)
+{
+ size_t start, i;
+ uintptr_t e, perfect = ht_perfect_mask(ht);
+
+ /* Beware wrap cases: we need to start from first empty bucket. */
+ for (start = 0; ht->table[start]; start++);
+
+ for (i = 0; i < (size_t)1 << ht->bits; i++) {
+ size_t h = (i + start) & ((1 << ht->bits)-1);
+ e = ht->table[h];
+ if (!e)
+ continue;
+ if (e == HTABLE_DELETED)
+ ht->table[h] = 0;
+ else if (!(e & perfect)) {
+ void *p = get_raw_ptr(ht, e);
+ ht->table[h] = 0;
+ ht_add(ht, p, ht->rehash(p, ht->priv));
+ }
+ }
+ ht->deleted = 0;
+ (void)htable_debug(ht, HTABLE_LOC);
+}
+
+/* We stole some bits, now we need to put them back... */
+static COLD void update_common(struct htable *ht, const void *p)
+{
+ uintptr_t maskdiff;
+
+ if (ht->elems == 0) {
+ ht->common_mask = -1;
+ ht->common_bits = ((uintptr_t)p & ht->common_mask);
+ ht->perfect_bitnum = 0;
+ (void)htable_debug(ht, HTABLE_LOC);
+ return;
+ }
+
+ /* Find bits which are unequal to old common set. */
+ maskdiff = ht->common_bits ^ ((uintptr_t)p & ht->common_mask);
+
+ fixup_table_common(ht, maskdiff);
+ (void)htable_debug(ht, HTABLE_LOC);
+}
+
+bool htable_add_(struct htable *ht, size_t hash, const void *p)
+{
+ /* Cannot insert NULL, or (void *)1. */
+ assert(p);
+ assert(entry_is_valid((uintptr_t)p));
+
+ /* Getting too full? */
+ if (ht->elems+1 + ht->deleted > ht_max(ht)) {
+ /* If we're more than 1/8 deleted, clean those,
+ * otherwise double table size. */
+ if (ht->deleted > ht_max_deleted(ht))
+ rehash_table(ht);
+ else if (!double_table(ht))
+ return false;
+ }
+ if (((uintptr_t)p & ht->common_mask) != ht->common_bits)
+ update_common(ht, p);
+
+ ht_add(ht, p, hash);
+ ht->elems++;
+ return true;
+}
+
+bool htable_del_(struct htable *ht, size_t h, const void *p)
+{
+ struct htable_iter i;
+ void *c;
+
+ for (c = htable_firstval(ht,&i,h); c; c = htable_nextval(ht,&i,h)) {
+ if (c == p) {
+ htable_delval(ht, &i);
+ return true;
+ }
+ }
+ return false;
+}
+
+void htable_delval_(struct htable *ht, struct htable_iter *i)
+{
+ assert(i->off < (size_t)1 << ht->bits);
+ assert(entry_is_valid(ht->table[i->off]));
+
+ ht->elems--;
+ /* Cheap test: if the next bucket is empty, don't need delete marker */
+ if (ht->table[hash_bucket(ht, i->off+1)] != 0) {
+ ht->table[i->off] = HTABLE_DELETED;
+ ht->deleted++;
+ } else
+ ht->table[i->off] = 0;
+}
+
+void *htable_pick_(const struct htable *ht, size_t seed, struct htable_iter *i)
+{
+ void *e;
+ struct htable_iter unwanted;
+
+ if (!i)
+ i = &unwanted;
+ i->off = seed % ((size_t)1 << ht->bits);
+ e = htable_next(ht, i);
+ if (!e)
+ e = htable_first(ht, i);
+ return e;
+}
+
+struct htable *htable_check(const struct htable *ht, const char *abortstr)
+{
+ void *p;
+ struct htable_iter i;
+ size_t n = 0;
+
+ /* Use non-DEBUG versions here, to avoid infinite recursion with
+ * CCAN_HTABLE_DEBUG! */
+ for (p = htable_first_(ht, &i); p; p = htable_next_(ht, &i)) {
+ struct htable_iter i2;
+ void *c;
+ size_t h = ht->rehash(p, ht->priv);
+ bool found = false;
+
+ n++;
+
+ /* Open-code htable_get to avoid CCAN_HTABLE_DEBUG */
+ for (c = htable_firstval_(ht, &i2, h);
+ c;
+ c = htable_nextval_(ht, &i2, h)) {
+ if (c == p) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ if (abortstr) {
+ fprintf(stderr,
+ "%s: element %p in position %zu"
+ " cannot find itself\n",
+ abortstr, p, i.off);
+ abort();
+ }
+ return NULL;
+ }
+ }
+ if (n != ht->elems) {
+ if (abortstr) {
+ fprintf(stderr,
+ "%s: found %zu elems, expected %zu\n",
+ abortstr, n, ht->elems);
+ abort();
+ }
+ return NULL;
+ }
+
+ return (struct htable *)ht;
+}
diff --git a/ccan/ccan/htable/htable.h b/ccan/ccan/htable/htable.h
new file mode 100644
index 0000000..faaf541
--- /dev/null
+++ b/ccan/ccan/htable/htable.h
@@ -0,0 +1,290 @@
+/* Licensed under LGPLv2+ - see LICENSE file for details */
+#ifndef CCAN_HTABLE_H
+#define CCAN_HTABLE_H
+#include "config.h"
+#include <ccan/str/str.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <stdlib.h>
+
+/* Define CCAN_HTABLE_DEBUG for expensive debugging checks on each call. */
+#define HTABLE_LOC __FILE__ ":" stringify(__LINE__)
+#ifdef CCAN_HTABLE_DEBUG
+#define htable_debug(h, loc) htable_check((h), loc)
+#else
+#define htable_debug(h, loc) ((void)loc, h)
+#endif
+
+/**
+ * struct htable - private definition of a htable.
+ *
+ * It's exposed here so you can put it in your structures and so we can
+ * supply inline functions.
+ */
+struct htable {
+ size_t (*rehash)(const void *elem, void *priv);
+ void *priv;
+ unsigned int bits, perfect_bitnum;
+ size_t elems, deleted;
+ /* These are the bits which are the same in all pointers. */
+ uintptr_t common_mask, common_bits;
+ uintptr_t *table;
+};
+
+/**
+ * HTABLE_INITIALIZER - static initialization for a hash table.
+ * @name: name of this htable.
+ * @rehash: hash function to use for rehashing.
+ * @priv: private argument to @rehash function.
+ *
+ * This is useful for setting up static and global hash tables.
+ *
+ * Example:
+ * // For simplicity's sake, say hash value is contents of elem.
+ * static size_t rehash(const void *elem, void *unused)
+ * {
+ * (void)unused;
+ * return *(size_t *)elem;
+ * }
+ * static struct htable ht = HTABLE_INITIALIZER(ht, rehash, NULL);
+ */
+#define HTABLE_INITIALIZER(name, rehash, priv) \
+ { rehash, priv, 0, 0, 0, 0, -1, 0, &name.common_bits }
+
+/**
+ * htable_init - initialize an empty hash table.
+ * @ht: the hash table to initialize
+ * @rehash: hash function to use for rehashing.
+ * @priv: private argument to @rehash function.
+ */
+void htable_init(struct htable *ht,
+ size_t (*rehash)(const void *elem, void *priv), void *priv);
+
+/**
+ * htable_init_sized - initialize an empty hash table of given size.
+ * @ht: the hash table to initialize
+ * @rehash: hash function to use for rehashing.
+ * @priv: private argument to @rehash function.
+ * @size: the number of element.
+ *
+ * If this returns false, @ht is still usable, but may need to do reallocation
+ * upon an add. If this returns true, it will not need to reallocate within
+ * @size htable_adds.
+ */
+bool htable_init_sized(struct htable *ht,
+ size_t (*rehash)(const void *elem, void *priv),
+ void *priv, size_t size);
+
+/**
+ * htable_count - count number of entries in a hash table.
+ * @ht: the hash table
+ */
+static inline size_t htable_count(const struct htable *ht)
+{
+ return ht->elems;
+}
+
+/**
+ * htable_clear - empty a hash table.
+ * @ht: the hash table to clear
+ *
+ * This doesn't do anything to any pointers left in it.
+ */
+void htable_clear(struct htable *ht);
+
+
+/**
+ * htable_check - check hash table for consistency
+ * @ht: the htable
+ * @abortstr: the location to print on aborting, or NULL.
+ *
+ * Because hash tables have redundant information, consistency checking that
+ * each element is in the correct location can be done. This is useful as a
+ * debugging check. If @abortstr is non-NULL, that will be printed in a
+ * diagnostic if the htable is inconsistent, and the function will abort.
+ *
+ * Returns the htable if it is consistent, NULL if not (it can never return
+ * NULL if @abortstr is set).
+ */
+struct htable *htable_check(const struct htable *ht, const char *abortstr);
+
+/**
+ * htable_copy - duplicate a hash table.
+ * @dst: the hash table to overwrite
+ * @src: the hash table to copy
+ *
+ * Only fails on out-of-memory.
+ *
+ * Equivalent to (but faster than):
+ * if (!htable_init_sized(dst, src->rehash, src->priv, 1U << src->bits))
+ * return false;
+ * v = htable_first(src, &i);
+ * while (v) {
+ * htable_add(dst, v);
+ * v = htable_next(src, i);
+ * }
+ * return true;
+ */
+#define htable_copy(dst, src) htable_copy_(dst, htable_debug(src, HTABLE_LOC))
+bool htable_copy_(struct htable *dst, const struct htable *src);
+
+/**
+ * htable_add - add a pointer into a hash table.
+ * @ht: the htable
+ * @hash: the hash value of the object
+ * @p: the non-NULL pointer (also cannot be (void *)1).
+ *
+ * Also note that this can only fail due to allocation failure. Otherwise, it
+ * returns true.
+ */
+#define htable_add(ht, hash, p) \
+ htable_add_(htable_debug(ht, HTABLE_LOC), hash, p)
+bool htable_add_(struct htable *ht, size_t hash, const void *p);
+
+/**
+ * htable_del - remove a pointer from a hash table
+ * @ht: the htable
+ * @hash: the hash value of the object
+ * @p: the pointer
+ *
+ * Returns true if the pointer was found (and deleted).
+ */
+#define htable_del(ht, hash, p) \
+ htable_del_(htable_debug(ht, HTABLE_LOC), hash, p)
+bool htable_del_(struct htable *ht, size_t hash, const void *p);
+
+/**
+ * struct htable_iter - iterator or htable_first or htable_firstval etc.
+ *
+ * This refers to a location inside the hashtable.
+ */
+struct htable_iter {
+ size_t off;
+};
+
+/**
+ * htable_firstval - find a candidate for a given hash value
+ * @htable: the hashtable
+ * @i: the struct htable_iter to initialize
+ * @hash: the hash value
+ *
+ * You'll need to check the value is what you want; returns NULL if none.
+ * See Also:
+ * htable_delval()
+ */
+#define htable_firstval(htable, i, hash) \
+ htable_firstval_(htable_debug(htable, HTABLE_LOC), i, hash)
+
+void *htable_firstval_(const struct htable *htable,
+ struct htable_iter *i, size_t hash);
+
+/**
+ * htable_nextval - find another candidate for a given hash value
+ * @htable: the hashtable
+ * @i: the struct htable_iter to initialize
+ * @hash: the hash value
+ *
+ * You'll need to check the value is what you want; returns NULL if no more.
+ */
+#define htable_nextval(htable, i, hash) \
+ htable_nextval_(htable_debug(htable, HTABLE_LOC), i, hash)
+void *htable_nextval_(const struct htable *htable,
+ struct htable_iter *i, size_t hash);
+
+/**
+ * htable_get - find an entry in the hash table
+ * @ht: the hashtable
+ * @h: the hash value of the entry
+ * @cmp: the comparison function
+ * @ptr: the pointer to hand to the comparison function.
+ *
+ * Convenient inline wrapper for htable_firstval/htable_nextval loop.
+ */
+static inline void *htable_get(const struct htable *ht,
+ size_t h,
+ bool (*cmp)(const void *candidate, void *ptr),
+ const void *ptr)
+{
+ struct htable_iter i;
+ void *c;
+
+ for (c = htable_firstval(ht,&i,h); c; c = htable_nextval(ht,&i,h)) {
+ if (cmp(c, (void *)ptr))
+ return c;
+ }
+ return NULL;
+}
+
+/**
+ * htable_first - find an entry in the hash table
+ * @ht: the hashtable
+ * @i: the struct htable_iter to initialize
+ *
+ * Get an entry in the hashtable; NULL if empty.
+ */
+#define htable_first(htable, i) \
+ htable_first_(htable_debug(htable, HTABLE_LOC), i)
+void *htable_first_(const struct htable *htable, struct htable_iter *i);
+
+/**
+ * htable_next - find another entry in the hash table
+ * @ht: the hashtable
+ * @i: the struct htable_iter to use
+ *
+ * Get another entry in the hashtable; NULL if all done.
+ * This is usually used after htable_first or prior non-NULL htable_next.
+ */
+#define htable_next(htable, i) \
+ htable_next_(htable_debug(htable, HTABLE_LOC), i)
+void *htable_next_(const struct htable *htable, struct htable_iter *i);
+
+/**
+ * htable_prev - find the previous entry in the hash table
+ * @ht: the hashtable
+ * @i: the struct htable_iter to use
+ *
+ * Get previous entry in the hashtable; NULL if all done.
+ *
+ * "previous" here only means the item that would have been returned by
+ * htable_next() before the item it returned most recently.
+ *
+ * This is usually used in the middle of (or after) a htable_next iteration and
+ * to "unwind" actions taken.
+ */
+#define htable_prev(htable, i) \
+ htable_prev_(htable_debug(htable, HTABLE_LOC), i)
+void *htable_prev_(const struct htable *htable, struct htable_iter *i);
+
+/**
+ * htable_delval - remove an iterated pointer from a hash table
+ * @ht: the htable
+ * @i: the htable_iter
+ *
+ * Usually used to delete a hash entry after it has been found with
+ * htable_firstval etc.
+ */
+#define htable_delval(htable, i) \
+ htable_delval_(htable_debug(htable, HTABLE_LOC), i)
+void htable_delval_(struct htable *ht, struct htable_iter *i);
+
+/**
+ * htable_pick - set iterator to a random valid entry.
+ * @ht: the htable
+ * @seed: a random number to use.
+ * @i: the htable_iter which is output (or NULL).
+ *
+ * Usually used with htable_delval to delete a random entry. Returns
+ * NULL iff the table is empty, otherwise a random entry.
+ */
+#define htable_pick(htable, seed, i) \
+ htable_pick_(htable_debug(htable, HTABLE_LOC), seed, i)
+void *htable_pick_(const struct htable *ht, size_t seed, struct htable_iter *i);
+
+/**
+ * htable_set_allocator - set calloc/free functions.
+ * @alloc: allocator to use, must zero memory!
+ * @free: unallocator to use (@p is NULL or a return from @alloc)
+ */
+void htable_set_allocator(void *(*alloc)(struct htable *, size_t len),
+ void (*free)(struct htable *, void *p));
+#endif /* CCAN_HTABLE_H */
diff --git a/ccan/ccan/htable/htable_type.h b/ccan/ccan/htable/htable_type.h
new file mode 100644
index 0000000..0aacb7f
--- /dev/null
+++ b/ccan/ccan/htable/htable_type.h
@@ -0,0 +1,188 @@
+/* Licensed under LGPLv2+ - see LICENSE file for details */
+#ifndef CCAN_HTABLE_TYPE_H
+#define CCAN_HTABLE_TYPE_H
+#include <ccan/htable/htable.h>
+#include <ccan/compiler/compiler.h>
+#include "config.h"
+
+/**
+ * HTABLE_DEFINE_TYPE - create a set of htable ops for a type
+ * @type: a type whose pointers will be values in the hash.
+ * @keyof: a function/macro to extract a key: <keytype> @keyof(const type *elem)
+ * @hashfn: a hash function for a @key: size_t @hashfn(const <keytype> *)
+ * @eqfn: an equality function keys: bool @eqfn(const type *, const <keytype> *)
+ * @prefix: a prefix for all the functions to define (of form <name>_*)
+ *
+ * NULL values may not be placed into the hash table.
+ *
+ * This defines the type hashtable type and an iterator type:
+ * struct <name>;
+ * struct <name>_iter;
+ *
+ * It also defines initialization and freeing functions:
+ * void <name>_init(struct <name> *);
+ * bool <name>_init_sized(struct <name> *, size_t);
+ * void <name>_clear(struct <name> *);
+ * bool <name>_copy(struct <name> *dst, const struct <name> *src);
+ *
+ * Count entries:
+ * size_t <name>_count(const struct <name> *ht);
+ *
+ * Add function only fails if we run out of memory:
+ * bool <name>_add(struct <name> *ht, const <type> *e);
+ *
+ * Delete and delete-by key return true if it was in the set:
+ * bool <name>_del(struct <name> *ht, const <type> *e);
+ * bool <name>_delkey(struct <name> *ht, const <keytype> *k);
+ *
+ * Delete by iterator:
+ * bool <name>_delval(struct <name> *ht, struct <name>_iter *i);
+ *
+ * Find and return the (first) matching element, or NULL:
+ * type *<name>_get(const struct @name *ht, const <keytype> *k);
+ *
+ * Find and return all matching elements, or NULL:
+ * type *<name>_getfirst(const struct @name *ht, const <keytype> *k,
+ * struct <name>_iter *i);
+ * type *<name>_getnext(const struct @name *ht, const <keytype> *k,
+ * struct <name>_iter *i);
+ *
+ * Iteration over hashtable is also supported:
+ * type *<name>_first(const struct <name> *ht, struct <name>_iter *i);
+ * type *<name>_next(const struct <name> *ht, struct <name>_iter *i);
+ * type *<name>_prev(const struct <name> *ht, struct <name>_iter *i);
+ * type *<name>_pick(const struct <name> *ht, size_t seed,
+ * struct <name>_iter *i);
+ * It's currently safe to iterate over a changing hashtable, but you might
+ * miss an element. Iteration isn't very efficient, either.
+ *
+ * You can use HTABLE_INITIALIZER like so:
+ * struct <name> ht = { HTABLE_INITIALIZER(ht.raw, <name>_hash, NULL) };
+ */
+#define HTABLE_DEFINE_TYPE(type, keyof, hashfn, eqfn, name) \
+ struct name { struct htable raw; }; \
+ struct name##_iter { struct htable_iter i; }; \
+ static inline size_t name##_hash(const void *elem, void *priv) \
+ { \
+ (void)priv; \
+ return hashfn(keyof((const type *)elem)); \
+ } \
+ static inline UNNEEDED void name##_init(struct name *ht) \
+ { \
+ htable_init(&ht->raw, name##_hash, NULL); \
+ } \
+ static inline UNNEEDED bool name##_init_sized(struct name *ht, \
+ size_t s) \
+ { \
+ return htable_init_sized(&ht->raw, name##_hash, NULL, s); \
+ } \
+ static inline UNNEEDED size_t name##_count(const struct name *ht) \
+ { \
+ return htable_count(&ht->raw); \
+ } \
+ static inline UNNEEDED void name##_clear(struct name *ht) \
+ { \
+ htable_clear(&ht->raw); \
+ } \
+ static inline UNNEEDED bool name##_copy(struct name *dst, \
+ const struct name *src) \
+ { \
+ return htable_copy(&dst->raw, &src->raw); \
+ } \
+ static inline bool name##_add(struct name *ht, const type *elem) \
+ { \
+ return htable_add(&ht->raw, hashfn(keyof(elem)), elem); \
+ } \
+ static inline UNNEEDED bool name##_del(struct name *ht, \
+ const type *elem) \
+ { \
+ return htable_del(&ht->raw, hashfn(keyof(elem)), elem); \
+ } \
+ static inline UNNEEDED type *name##_get(const struct name *ht, \
+ const HTABLE_KTYPE(keyof, type) k) \
+ { \
+ struct htable_iter i; \
+ size_t h = hashfn(k); \
+ void *c; \
+ \
+ for (c = htable_firstval(&ht->raw,&i,h); \
+ c; \
+ c = htable_nextval(&ht->raw,&i,h)) { \
+ if (eqfn(c, k)) \
+ return c; \
+ } \
+ return NULL; \
+ } \
+ static inline UNNEEDED type *name##_getmatch_(const struct name *ht, \
+ const HTABLE_KTYPE(keyof, type) k, \
+ size_t h, \
+ type *v, \
+ struct name##_iter *iter) \
+ { \
+ while (v) { \
+ if (eqfn(v, k)) \
+ break; \
+ v = htable_nextval(&ht->raw, &iter->i, h); \
+ } \
+ return v; \
+ } \
+ static inline UNNEEDED type *name##_getfirst(const struct name *ht, \
+ const HTABLE_KTYPE(keyof, type) k, \
+ struct name##_iter *iter) \
+ { \
+ size_t h = hashfn(k); \
+ type *v = htable_firstval(&ht->raw, &iter->i, h); \
+ return name##_getmatch_(ht, k, h, v, iter); \
+ } \
+ static inline UNNEEDED type *name##_getnext(const struct name *ht, \
+ const HTABLE_KTYPE(keyof, type) k, \
+ struct name##_iter *iter) \
+ { \
+ size_t h = hashfn(k); \
+ type *v = htable_nextval(&ht->raw, &iter->i, h); \
+ return name##_getmatch_(ht, k, h, v, iter); \
+ } \
+ static inline UNNEEDED bool name##_delkey(struct name *ht, \
+ const HTABLE_KTYPE(keyof, type) k) \
+ { \
+ type *elem = name##_get(ht, k); \
+ if (elem) \
+ return name##_del(ht, elem); \
+ return false; \
+ } \
+ static inline UNNEEDED void name##_delval(struct name *ht, \
+ struct name##_iter *iter) \
+ { \
+ htable_delval(&ht->raw, &iter->i); \
+ } \
+ static inline UNNEEDED type *name##_pick(const struct name *ht, \
+ size_t seed, \
+ struct name##_iter *iter) \
+ { \
+ return htable_pick(&ht->raw, seed, iter ? &iter->i : NULL); \
+ } \
+ static inline UNNEEDED type *name##_first(const struct name *ht, \
+ struct name##_iter *iter) \
+ { \
+ return htable_first(&ht->raw, &iter->i); \
+ } \
+ static inline UNNEEDED type *name##_next(const struct name *ht, \
+ struct name##_iter *iter) \
+ { \
+ return htable_next(&ht->raw, &iter->i); \
+ } \
+ static inline UNNEEDED type *name##_prev(const struct name *ht, \
+ struct name##_iter *iter) \
+ { \
+ return htable_prev(&ht->raw, &iter->i); \
+ }
+
+#if HAVE_TYPEOF
+#define HTABLE_KTYPE(keyof, type) typeof(keyof((const type *)NULL))
+#else
+/* Assumes keys are a pointer: if not, override. */
+#ifndef HTABLE_KTYPE
+#define HTABLE_KTYPE(keyof, type) void *
+#endif
+#endif
+#endif /* CCAN_HTABLE_TYPE_H */
diff --git a/ccan/ccan/ilog/LICENSE b/ccan/ccan/ilog/LICENSE
new file mode 120000
index 0000000..b7951da
--- /dev/null
+++ b/ccan/ccan/ilog/LICENSE
@@ -0,0 +1 @@
+../../licenses/CC0 \ No newline at end of file
diff --git a/ccan/ccan/ilog/ilog.c b/ccan/ccan/ilog/ilog.c
new file mode 100644
index 0000000..5f5122d
--- /dev/null
+++ b/ccan/ccan/ilog/ilog.c
@@ -0,0 +1,141 @@
+/*(C) Timothy B. Terriberry (tterribe@xiph.org) 2001-2009 CC0 (Public domain).
+ * See LICENSE file for details. */
+#include "ilog.h"
+#include <limits.h>
+
+/*The fastest fallback strategy for platforms with fast multiplication appears
+ to be based on de Bruijn sequences~\cite{LP98}.
+ Tests confirmed this to be true even on an ARM11, where it is actually faster
+ than using the native clz instruction.
+ Define ILOG_NODEBRUIJN to use a simpler fallback on platforms where
+ multiplication or table lookups are too expensive.
+
+ @UNPUBLISHED{LP98,
+ author="Charles E. Leiserson and Harald Prokop",
+ title="Using de {Bruijn} Sequences to Index a 1 in a Computer Word",
+ month=Jun,
+ year=1998,
+ note="\url{http://supertech.csail.mit.edu/papers/debruijn.pdf}"
+ }*/
+static UNNEEDED const unsigned char DEBRUIJN_IDX32[32]={
+ 0, 1,28, 2,29,14,24, 3,30,22,20,15,25,17, 4, 8,
+ 31,27,13,23,21,19,16, 7,26,12,18, 6,11, 5,10, 9
+};
+
+/* We always compile these in, in case someone takes address of function. */
+#undef ilog32_nz
+#undef ilog32
+#undef ilog64_nz
+#undef ilog64
+
+int ilog32(uint32_t _v){
+/*On a Pentium M, this branchless version tested as the fastest version without
+ multiplications on 1,000,000,000 random 32-bit integers, edging out a
+ similar version with branches, and a 256-entry LUT version.*/
+# if defined(ILOG_NODEBRUIJN)
+ int ret;
+ int m;
+ ret=_v>0;
+ m=(_v>0xFFFFU)<<4;
+ _v>>=m;
+ ret|=m;
+ m=(_v>0xFFU)<<3;
+ _v>>=m;
+ ret|=m;
+ m=(_v>0xFU)<<2;
+ _v>>=m;
+ ret|=m;
+ m=(_v>3)<<1;
+ _v>>=m;
+ ret|=m;
+ ret+=_v>1;
+ return ret;
+/*This de Bruijn sequence version is faster if you have a fast multiplier.*/
+# else
+ int ret;
+ ret=_v>0;
+ _v|=_v>>1;
+ _v|=_v>>2;
+ _v|=_v>>4;
+ _v|=_v>>8;
+ _v|=_v>>16;
+ _v=(_v>>1)+1;
+ ret+=DEBRUIJN_IDX32[_v*0x77CB531U>>27&0x1F];
+ return ret;
+# endif
+}
+
+int ilog32_nz(uint32_t _v)
+{
+ return ilog32(_v);
+}
+
+int ilog64(uint64_t _v){
+# if defined(ILOG_NODEBRUIJN)
+ uint32_t v;
+ int ret;
+ int m;
+ ret=_v>0;
+ m=(_v>0xFFFFFFFFU)<<5;
+ v=(uint32_t)(_v>>m);
+ ret|=m;
+ m=(v>0xFFFFU)<<4;
+ v>>=m;
+ ret|=m;
+ m=(v>0xFFU)<<3;
+ v>>=m;
+ ret|=m;
+ m=(v>0xFU)<<2;
+ v>>=m;
+ ret|=m;
+ m=(v>3)<<1;
+ v>>=m;
+ ret|=m;
+ ret+=v>1;
+ return ret;
+# else
+/*If we don't have a 64-bit word, split it into two 32-bit halves.*/
+# if LONG_MAX<9223372036854775807LL
+ uint32_t v;
+ int ret;
+ int m;
+ ret=_v>0;
+ m=(_v>0xFFFFFFFFU)<<5;
+ v=(uint32_t)(_v>>m);
+ ret|=m;
+ v|=v>>1;
+ v|=v>>2;
+ v|=v>>4;
+ v|=v>>8;
+ v|=v>>16;
+ v=(v>>1)+1;
+ ret+=DEBRUIJN_IDX32[v*0x77CB531U>>27&0x1F];
+ return ret;
+/*Otherwise do it in one 64-bit operation.*/
+# else
+ static const unsigned char DEBRUIJN_IDX64[64]={
+ 0, 1, 2, 7, 3,13, 8,19, 4,25,14,28, 9,34,20,40,
+ 5,17,26,38,15,46,29,48,10,31,35,54,21,50,41,57,
+ 63, 6,12,18,24,27,33,39,16,37,45,47,30,53,49,56,
+ 62,11,23,32,36,44,52,55,61,22,43,51,60,42,59,58
+ };
+ int ret;
+ ret=_v>0;
+ _v|=_v>>1;
+ _v|=_v>>2;
+ _v|=_v>>4;
+ _v|=_v>>8;
+ _v|=_v>>16;
+ _v|=_v>>32;
+ _v=(_v>>1)+1;
+ ret+=DEBRUIJN_IDX64[_v*0x218A392CD3D5DBF>>58&0x3F];
+ return ret;
+# endif
+# endif
+}
+
+int ilog64_nz(uint64_t _v)
+{
+ return ilog64(_v);
+}
+
diff --git a/ccan/ccan/ilog/ilog.h b/ccan/ccan/ilog/ilog.h
new file mode 100644
index 0000000..32702b1
--- /dev/null
+++ b/ccan/ccan/ilog/ilog.h
@@ -0,0 +1,154 @@
+/* CC0 (Public domain) - see LICENSE file for details */
+#if !defined(_ilog_H)
+# define _ilog_H (1)
+# include "config.h"
+# include <stdint.h>
+# include <limits.h>
+# include <ccan/compiler/compiler.h>
+
+/**
+ * ilog32 - Integer binary logarithm of a 32-bit value.
+ * @_v: A 32-bit value.
+ * Returns floor(log2(_v))+1, or 0 if _v==0.
+ * This is the number of bits that would be required to represent _v in two's
+ * complement notation with all of the leading zeros stripped.
+ * Note that many uses will resolve to the fast macro version instead.
+ *
+ * See Also:
+ * ilog32_nz(), ilog64()
+ *
+ * Example:
+ * // Rounds up to next power of 2 (if not a power of 2).
+ * static uint32_t round_up32(uint32_t i)
+ * {
+ * assert(i != 0);
+ * return 1U << ilog32(i-1);
+ * }
+ */
+int ilog32(uint32_t _v) CONST_FUNCTION;
+
+/**
+ * ilog32_nz - Integer binary logarithm of a non-zero 32-bit value.
+ * @_v: A 32-bit value.
+ * Returns floor(log2(_v))+1, or undefined if _v==0.
+ * This is the number of bits that would be required to represent _v in two's
+ * complement notation with all of the leading zeros stripped.
+ * Note that many uses will resolve to the fast macro version instead.
+ * See Also:
+ * ilog32(), ilog64_nz()
+ * Example:
+ * // Find Last Set (ie. highest bit set, 0 to 31).
+ * static uint32_t fls32(uint32_t i)
+ * {
+ * assert(i != 0);
+ * return ilog32_nz(i) - 1;
+ * }
+ */
+int ilog32_nz(uint32_t _v) CONST_FUNCTION;
+
+/**
+ * ilog64 - Integer binary logarithm of a 64-bit value.
+ * @_v: A 64-bit value.
+ * Returns floor(log2(_v))+1, or 0 if _v==0.
+ * This is the number of bits that would be required to represent _v in two's
+ * complement notation with all of the leading zeros stripped.
+ * Note that many uses will resolve to the fast macro version instead.
+ * See Also:
+ * ilog64_nz(), ilog32()
+ */
+int ilog64(uint64_t _v) CONST_FUNCTION;
+
+/**
+ * ilog64_nz - Integer binary logarithm of a non-zero 64-bit value.
+ * @_v: A 64-bit value.
+ * Returns floor(log2(_v))+1, or undefined if _v==0.
+ * This is the number of bits that would be required to represent _v in two's
+ * complement notation with all of the leading zeros stripped.
+ * Note that many uses will resolve to the fast macro version instead.
+ * See Also:
+ * ilog64(), ilog32_nz()
+ */
+int ilog64_nz(uint64_t _v) CONST_FUNCTION;
+
+/**
+ * STATIC_ILOG_32 - The integer logarithm of an (unsigned, 32-bit) constant.
+ * @_v: A non-negative 32-bit constant.
+ * Returns floor(log2(_v))+1, or 0 if _v==0.
+ * This is the number of bits that would be required to represent _v in two's
+ * complement notation with all of the leading zeros stripped.
+ * This macro should only be used when you need a compile-time constant,
+ * otherwise ilog32 or ilog32_nz are just as fast and more flexible.
+ *
+ * Example:
+ * #define MY_PAGE_SIZE 4096
+ * #define MY_PAGE_BITS (STATIC_ILOG_32(PAGE_SIZE) - 1)
+ */
+#define STATIC_ILOG_32(_v) (STATIC_ILOG5((uint32_t)(_v)))
+
+/**
+ * STATIC_ILOG_64 - The integer logarithm of an (unsigned, 64-bit) constant.
+ * @_v: A non-negative 64-bit constant.
+ * Returns floor(log2(_v))+1, or 0 if _v==0.
+ * This is the number of bits that would be required to represent _v in two's
+ * complement notation with all of the leading zeros stripped.
+ * This macro should only be used when you need a compile-time constant,
+ * otherwise ilog64 or ilog64_nz are just as fast and more flexible.
+ */
+#define STATIC_ILOG_64(_v) (STATIC_ILOG6((uint64_t)(_v)))
+
+/* Private implementation details */
+
+/*Note the casts to (int) below: this prevents "upgrading"
+ the type of an entire expression to an (unsigned) size_t.*/
+#if INT_MAX>=2147483647 && HAVE_BUILTIN_CLZ
+#define builtin_ilog32_nz(v) \
+ (((int)sizeof(unsigned)*CHAR_BIT) - __builtin_clz(v))
+#elif LONG_MAX>=2147483647L && HAVE_BUILTIN_CLZL
+#define builtin_ilog32_nz(v) \
+ (((int)sizeof(unsigned)*CHAR_BIT) - __builtin_clzl(v))
+#endif
+
+#if INT_MAX>=9223372036854775807LL && HAVE_BUILTIN_CLZ
+#define builtin_ilog64_nz(v) \
+ (((int)sizeof(unsigned)*CHAR_BIT) - __builtin_clz(v))
+#elif LONG_MAX>=9223372036854775807LL && HAVE_BUILTIN_CLZL
+#define builtin_ilog64_nz(v) \
+ (((int)sizeof(unsigned long)*CHAR_BIT) - __builtin_clzl(v))
+#elif HAVE_BUILTIN_CLZLL
+#define builtin_ilog64_nz(v) \
+ (((int)sizeof(unsigned long long)*CHAR_BIT) - __builtin_clzll(v))
+#endif
+
+#ifdef builtin_ilog32_nz
+/* This used to be builtin_ilog32_nz(_v)&-!!(_v), which means it zeroes out
+ * the undefined builtin_ilog32_nz(0) return. But clang UndefinedBehaviorSantizer
+ * complains, so do the branch: */
+#define ilog32(_v) ((_v) ? builtin_ilog32_nz(_v) : 0)
+#define ilog32_nz(_v) builtin_ilog32_nz(_v)
+#else
+#define ilog32_nz(_v) ilog32(_v)
+#define ilog32(_v) (IS_COMPILE_CONSTANT(_v) ? STATIC_ILOG_32(_v) : ilog32(_v))
+#endif /* builtin_ilog32_nz */
+
+#ifdef builtin_ilog64_nz
+#define ilog32(_v) ((_v) ? builtin_ilog32_nz(_v) : 0)
+#define ilog64_nz(_v) builtin_ilog64_nz(_v)
+#else
+#define ilog64_nz(_v) ilog64(_v)
+#define ilog64(_v) (IS_COMPILE_CONSTANT(_v) ? STATIC_ILOG_64(_v) : ilog64(_v))
+#endif /* builtin_ilog64_nz */
+
+/* Macros for evaluating compile-time constant ilog. */
+# define STATIC_ILOG0(_v) (!!(_v))
+# define STATIC_ILOG1(_v) (((_v)&0x2)?2:STATIC_ILOG0(_v))
+# define STATIC_ILOG2(_v) (((_v)&0xC)?2+STATIC_ILOG1((_v)>>2):STATIC_ILOG1(_v))
+# define STATIC_ILOG3(_v) \
+ (((_v)&0xF0)?4+STATIC_ILOG2((_v)>>4):STATIC_ILOG2(_v))
+# define STATIC_ILOG4(_v) \
+ (((_v)&0xFF00)?8+STATIC_ILOG3((_v)>>8):STATIC_ILOG3(_v))
+# define STATIC_ILOG5(_v) \
+ (((_v)&0xFFFF0000)?16+STATIC_ILOG4((_v)>>16):STATIC_ILOG4(_v))
+# define STATIC_ILOG6(_v) \
+ (((_v)&0xFFFFFFFF00000000ULL)?32+STATIC_ILOG5((_v)>>32):STATIC_ILOG5(_v))
+
+#endif /* _ilog_H */
diff --git a/ccan/ccan/likely/LICENSE b/ccan/ccan/likely/LICENSE
new file mode 120000
index 0000000..b7951da
--- /dev/null
+++ b/ccan/ccan/likely/LICENSE
@@ -0,0 +1 @@
+../../licenses/CC0 \ No newline at end of file
diff --git a/ccan/ccan/likely/likely.c b/ccan/ccan/likely/likely.c
new file mode 100644
index 0000000..83e8d6f
--- /dev/null
+++ b/ccan/ccan/likely/likely.c
@@ -0,0 +1,136 @@
+/* CC0 (Public domain) - see LICENSE file for details. */
+#ifdef CCAN_LIKELY_DEBUG
+#include <ccan/likely/likely.h>
+#include <ccan/hash/hash.h>
+#include <ccan/htable/htable_type.h>
+#include <stdlib.h>
+#include <stdio.h>
+struct trace {
+ const char *condstr;
+ const char *file;
+ unsigned int line;
+ bool expect;
+ unsigned long count, right;
+};
+
+static size_t hash_trace(const struct trace *trace)
+{
+ return hash(trace->condstr, strlen(trace->condstr),
+ hash(trace->file, strlen(trace->file),
+ trace->line + trace->expect));
+}
+
+static bool trace_eq(const struct trace *t1, const struct trace *t2)
+{
+ return t1->condstr == t2->condstr
+ && t1->file == t2->file
+ && t1->line == t2->line
+ && t1->expect == t2->expect;
+}
+
+/* struct thash */
+HTABLE_DEFINE_TYPE(struct trace, (const struct trace *), hash_trace, trace_eq,
+ thash);
+
+static struct thash htable
+= { HTABLE_INITIALIZER(htable.raw, thash_hash, NULL) };
+
+static void init_trace(struct trace *trace,
+ const char *condstr, const char *file, unsigned int line,
+ bool expect)
+{
+ trace->condstr = condstr;
+ trace->file = file;
+ trace->line = line;
+ trace->expect = expect;
+ trace->count = trace->right = 0;
+}
+
+static struct trace *add_trace(const struct trace *t)
+{
+ struct trace *trace = malloc(sizeof(*trace));
+ *trace = *t;
+ thash_add(&htable, trace);
+ return trace;
+}
+
+long _likely_trace(bool cond, bool expect,
+ const char *condstr,
+ const char *file, unsigned int line)
+{
+ struct trace *p, trace;
+
+ init_trace(&trace, condstr, file, line, expect);
+ p = thash_get(&htable, &trace);
+ if (!p)
+ p = add_trace(&trace);
+
+ p->count++;
+ if (cond == expect)
+ p->right++;
+
+ return cond;
+}
+
+static double right_ratio(const struct trace *t)
+{
+ return (double)t->right / t->count;
+}
+
+char *likely_stats(unsigned int min_hits, unsigned int percent)
+{
+ struct trace *worst;
+ double worst_ratio;
+ struct thash_iter i;
+ char *ret;
+ struct trace *t;
+
+ worst = NULL;
+ worst_ratio = 2;
+
+ /* This is O(n), but it's not likely called that often. */
+ for (t = thash_first(&htable, &i); t; t = thash_next(&htable, &i)) {
+ if (t->count >= min_hits) {
+ if (right_ratio(t) < worst_ratio) {
+ worst = t;
+ worst_ratio = right_ratio(t);
+ }
+ }
+ }
+
+ if (worst_ratio * 100 > percent)
+ return NULL;
+
+ ret = malloc(strlen(worst->condstr) +
+ strlen(worst->file) +
+ sizeof(long int) * 8 +
+ sizeof("%s:%u:%slikely(%s) correct %u%% (%lu/%lu)"));
+ sprintf(ret, "%s:%u:%slikely(%s) correct %u%% (%lu/%lu)",
+ worst->file, worst->line,
+ worst->expect ? "" : "un", worst->condstr,
+ (unsigned)(worst_ratio * 100),
+ worst->right, worst->count);
+
+ thash_del(&htable, worst);
+ free(worst);
+
+ return ret;
+}
+
+void likely_stats_reset(void)
+{
+ struct thash_iter i;
+ struct trace *t;
+
+ /* This is a bit better than O(n^2), but we have to loop since
+ * first/next during delete is unreliable. */
+ while ((t = thash_first(&htable, &i)) != NULL) {
+ for (; t; t = thash_next(&htable, &i)) {
+ thash_del(&htable, t);
+ free(t);
+ }
+ }
+
+ thash_clear(&htable);
+}
+#endif /*CCAN_LIKELY_DEBUG*/
diff --git a/ccan/ccan/likely/likely.h b/ccan/ccan/likely/likely.h
new file mode 100644
index 0000000..a8f003d
--- /dev/null
+++ b/ccan/ccan/likely/likely.h
@@ -0,0 +1,111 @@
+/* CC0 (Public domain) - see LICENSE file for details */
+#ifndef CCAN_LIKELY_H
+#define CCAN_LIKELY_H
+#include "config.h"
+#include <stdbool.h>
+
+#ifndef CCAN_LIKELY_DEBUG
+#if HAVE_BUILTIN_EXPECT
+/**
+ * likely - indicate that a condition is likely to be true.
+ * @cond: the condition
+ *
+ * This uses a compiler extension where available to indicate a likely
+ * code path and optimize appropriately; it's also useful for readers
+ * to quickly identify exceptional paths through functions. The
+ * threshold for "likely" is usually considered to be between 90 and
+ * 99%; marginal cases should not be marked either way.
+ *
+ * See Also:
+ * unlikely(), likely_stats()
+ *
+ * Example:
+ * // Returns false if we overflow.
+ * static inline bool inc_int(unsigned int *val)
+ * {
+ * (*val)++;
+ * if (likely(*val))
+ * return true;
+ * return false;
+ * }
+ */
+#define likely(cond) __builtin_expect(!!(cond), 1)
+
+/**
+ * unlikely - indicate that a condition is unlikely to be true.
+ * @cond: the condition
+ *
+ * This uses a compiler extension where available to indicate an unlikely
+ * code path and optimize appropriately; see likely() above.
+ *
+ * See Also:
+ * likely(), likely_stats(), COLD (compiler.h)
+ *
+ * Example:
+ * // Prints a warning if we overflow.
+ * static inline void inc_int(unsigned int *val)
+ * {
+ * (*val)++;
+ * if (unlikely(*val == 0))
+ * fprintf(stderr, "Overflow!");
+ * }
+ */
+#define unlikely(cond) __builtin_expect(!!(cond), 0)
+#else
+#define likely(cond) (!!(cond))
+#define unlikely(cond) (!!(cond))
+#endif
+#else /* CCAN_LIKELY_DEBUG versions */
+#include <ccan/str/str.h>
+
+#define likely(cond) \
+ (_likely_trace(!!(cond), 1, stringify(cond), __FILE__, __LINE__))
+#define unlikely(cond) \
+ (_likely_trace(!!(cond), 0, stringify(cond), __FILE__, __LINE__))
+
+long _likely_trace(bool cond, bool expect,
+ const char *condstr,
+ const char *file, unsigned int line);
+/**
+ * likely_stats - return description of abused likely()/unlikely()
+ * @min_hits: minimum number of hits
+ * @percent: maximum percentage correct
+ *
+ * When CCAN_LIKELY_DEBUG is defined, likely() and unlikely() trace their
+ * results: this causes a significant slowdown, but allows analysis of
+ * whether the branches are labelled correctly.
+ *
+ * This function returns a malloc'ed description of the least-correct
+ * usage of likely() or unlikely(). It ignores places which have been
+ * called less than @min_hits times, and those which were predicted
+ * correctly more than @percent of the time. It returns NULL when
+ * nothing meets those criteria.
+ *
+ * Note that this call is destructive; the returned offender is
+ * removed from the trace so that the next call to likely_stats() will
+ * return the next-worst likely()/unlikely() usage.
+ *
+ * Example:
+ * // Print every place hit more than twice which was wrong > 5%.
+ * static void report_stats(void)
+ * {
+ * #ifdef CCAN_LIKELY_DEBUG
+ * const char *bad;
+ *
+ * while ((bad = likely_stats(2, 95)) != NULL) {
+ * printf("Suspicious likely: %s", bad);
+ * free(bad);
+ * }
+ * #endif
+ * }
+ */
+char *likely_stats(unsigned int min_hits, unsigned int percent);
+
+/**
+ * likely_stats_reset - free up memory of likely()/unlikely() branches.
+ *
+ * This can also plug memory leaks.
+ */
+void likely_stats_reset(void);
+#endif /* CCAN_LIKELY_DEBUG */
+#endif /* CCAN_LIKELY_H */
diff --git a/ccan/ccan/list/_info b/ccan/ccan/list/_info
deleted file mode 100644
index c4f3e2a..0000000
--- a/ccan/ccan/list/_info
+++ /dev/null
@@ -1,72 +0,0 @@
-#include "config.h"
-#include <stdio.h>
-#include <string.h>
-
-/**
- * list - double linked list routines
- *
- * The list header contains routines for manipulating double linked lists.
- * It defines two types: struct list_head used for anchoring lists, and
- * struct list_node which is usually embedded in the structure which is placed
- * in the list.
- *
- * Example:
- * #include <err.h>
- * #include <stdio.h>
- * #include <stdlib.h>
- * #include <ccan/list/list.h>
- *
- * struct parent {
- * const char *name;
- * struct list_head children;
- * unsigned int num_children;
- * };
- *
- * struct child {
- * const char *name;
- * struct list_node list;
- * };
- *
- * int main(int argc, char *argv[])
- * {
- * struct parent p;
- * struct child *c;
- * int i;
- *
- * if (argc < 2)
- * errx(1, "Usage: %s parent children...", argv[0]);
- *
- * p.name = argv[1];
- * list_head_init(&p.children);
- * p.num_children = 0;
- * for (i = 2; i < argc; i++) {
- * c = malloc(sizeof(*c));
- * c->name = argv[i];
- * list_add(&p.children, &c->list);
- * p.num_children++;
- * }
- *
- * printf("%s has %u children:", p.name, p.num_children);
- * list_for_each(&p.children, c, list)
- * printf("%s ", c->name);
- * printf("\n");
- * return 0;
- * }
- *
- * License: BSD-MIT
- * Author: Rusty Russell <rusty@rustcorp.com.au>
- */
-int main(int argc, char *argv[])
-{
- if (argc != 2)
- return 1;
-
- if (strcmp(argv[1], "depends") == 0) {
- printf("ccan/str\n");
- printf("ccan/container_of\n");
- printf("ccan/check_type\n");
- return 0;
- }
-
- return 1;
-}
diff --git a/ccan/ccan/short_types/LICENSE b/ccan/ccan/short_types/LICENSE
new file mode 120000
index 0000000..b7951da
--- /dev/null
+++ b/ccan/ccan/short_types/LICENSE
@@ -0,0 +1 @@
+../../licenses/CC0 \ No newline at end of file
diff --git a/ccan/ccan/short_types/short_types.h b/ccan/ccan/short_types/short_types.h
new file mode 100644
index 0000000..175377e
--- /dev/null
+++ b/ccan/ccan/short_types/short_types.h
@@ -0,0 +1,35 @@
+/* CC0 (Public domain) - see LICENSE file for details */
+#ifndef CCAN_SHORT_TYPES_H
+#define CCAN_SHORT_TYPES_H
+#include <stdint.h>
+
+/**
+ * u64/s64/u32/s32/u16/s16/u8/s8 - short names for explicitly-sized types.
+ */
+typedef uint64_t u64;
+typedef int64_t s64;
+typedef uint32_t u32;
+typedef int32_t s32;
+typedef uint16_t u16;
+typedef int16_t s16;
+typedef uint8_t u8;
+typedef int8_t s8;
+
+/* Whichever they include first, they get these definitions. */
+#ifdef CCAN_ENDIAN_H
+/**
+ * be64/be32/be16 - 64/32/16 bit big-endian representation.
+ */
+typedef beint64_t be64;
+typedef beint32_t be32;
+typedef beint16_t be16;
+
+/**
+ * le64/le32/le16 - 64/32/16 bit little-endian representation.
+ */
+typedef leint64_t le64;
+typedef leint32_t le32;
+typedef leint16_t le16;
+#endif
+
+#endif /* CCAN_SHORT_TYPES_H */
diff --git a/ccan/ccan/str/_info b/ccan/ccan/str/_info
deleted file mode 100644
index b579525..0000000
--- a/ccan/ccan/str/_info
+++ /dev/null
@@ -1,52 +0,0 @@
-#include "config.h"
-#include <stdio.h>
-#include <string.h>
-
-/**
- * str - string helper routines
- *
- * This is a grab bag of functions for string operations, designed to enhance
- * the standard string.h.
- *
- * Note that if you define CCAN_STR_DEBUG, you will get extra compile
- * checks on common misuses of the following functions (they will now
- * be out-of-line, so there is a runtime penalty!).
- *
- * strstr, strchr, strrchr:
- * Return const char * if first argument is const (gcc only).
- *
- * isalnum, isalpha, isascii, isblank, iscntrl, isdigit, isgraph,
- * islower, isprint, ispunct, isspace, isupper, isxdigit:
- * Static and runtime check that input is EOF or an *unsigned*
- * char, as per C standard (really!).
- *
- * Example:
- * #include <stdio.h>
- * #include <ccan/str/str.h>
- *
- * int main(int argc, char *argv[])
- * {
- * if (argc > 1 && streq(argv[1], "--verbose"))
- * printf("verbose set\n");
- * if (argc > 1 && strstarts(argv[1], "--"))
- * printf("Some option set\n");
- * if (argc > 1 && strends(argv[1], "cow-powers"))
- * printf("Magic option set\n");
- * return 0;
- * }
- *
- * License: CC0 (Public domain)
- * Author: Rusty Russell <rusty@rustcorp.com.au>
- */
-int main(int argc, char *argv[])
-{
- if (argc != 2)
- return 1;
-
- if (strcmp(argv[1], "depends") == 0) {
- printf("ccan/build_assert\n");
- return 0;
- }
-
- return 1;
-}
diff --git a/ccan/ccan/strset/strset.c b/ccan/ccan/strset/strset.c
new file mode 100644
index 0000000..06b0d7a
--- /dev/null
+++ b/ccan/ccan/strset/strset.c
@@ -0,0 +1,309 @@
+/* This code is based on the public domain code at
+ * http://github.com/agl/critbit writtem by Adam Langley
+ * <agl@imperialviolet.org>.
+ *
+ * Here are the main implementation differences:
+ * (1) We don't strdup the string on insert; we use the pointer we're given.
+ * (2) We use a straight bit number rather than a mask; it's simpler.
+ * (3) We don't use the bottom bit of the pointer, but instead use a leading
+ * zero to distinguish nodes from strings.
+ * (4) The empty string (which would look like a node) is handled
+ * using a special "empty node".
+ * (5) Delete returns the string, so you can free it if you want to.
+ * (6) Unions instead of void *, bool instead of int.
+ */
+#include <ccan/strset/strset.h>
+#include <ccan/short_types/short_types.h>
+#include <ccan/likely/likely.h>
+#include <ccan/str/str.h>
+#include <ccan/ilog/ilog.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <errno.h>
+
+struct node {
+ /* To differentiate us from strings. */
+ char nul_byte;
+ /* The bit where these children differ. */
+ u8 bit_num;
+ /* The byte number where first bit differs (-1 == empty string node). */
+ size_t byte_num;
+ /* These point to strings or nodes. */
+ struct strset child[2];
+};
+
+/* Closest member to this in a non-empty set. */
+static const char *closest(struct strset n, const char *member)
+{
+ size_t len = strlen(member);
+ const u8 *bytes = (const u8 *)member;
+
+ /* Anything with first byte 0 is a node. */
+ while (!n.u.s[0]) {
+ u8 direction = 0;
+
+ /* Special node which represents the empty string. */
+ if (unlikely(n.u.n->byte_num == (size_t)-1)) {
+ n = n.u.n->child[0];
+ break;
+ }
+
+ if (n.u.n->byte_num < len) {
+ u8 c = bytes[n.u.n->byte_num];
+ direction = (c >> n.u.n->bit_num) & 1;
+ }
+ n = n.u.n->child[direction];
+ }
+ return n.u.s;
+}
+
+char *strset_get(const struct strset *set, const char *member)
+{
+ const char *str;
+
+ /* Non-empty set? */
+ if (set->u.n) {
+ str = closest(*set, member);
+ if (streq(member, str))
+ return (char *)str;
+ }
+ errno = ENOENT;
+ return NULL;
+}
+
+static bool set_string(struct strset *set,
+ struct strset *n, const char *member)
+{
+ /* Substitute magic empty node if this is the empty string */
+ if (unlikely(!member[0])) {
+ n->u.n = malloc(sizeof(*n->u.n));
+ if (unlikely(!n->u.n)) {
+ errno = ENOMEM;
+ return false;
+ }
+ n->u.n->nul_byte = '\0';
+ n->u.n->byte_num = (size_t)-1;
+ /* Attach the string to child[0] */
+ n = &n->u.n->child[0];
+ }
+ n->u.s = member;
+ return true;
+}
+
+bool strset_add(struct strset *set, const char *member)
+{
+ size_t len = strlen(member);
+ const u8 *bytes = (const u8 *)member;
+ struct strset *np;
+ const char *str;
+ struct node *newn;
+ size_t byte_num;
+ u8 bit_num, new_dir;
+
+ /* Empty set? */
+ if (!set->u.n) {
+ return set_string(set, set, member);
+ }
+
+ /* Find closest existing member. */
+ str = closest(*set, member);
+
+ /* Find where they differ. */
+ for (byte_num = 0; str[byte_num] == member[byte_num]; byte_num++) {
+ if (member[byte_num] == '\0') {
+ /* All identical! */
+ errno = EEXIST;
+ return false;
+ }
+ }
+
+ /* Find which bit differs (if we had ilog8, we'd use it) */
+ bit_num = ilog32_nz((u8)str[byte_num] ^ bytes[byte_num]) - 1;
+ assert(bit_num < CHAR_BIT);
+
+ /* Which direction do we go at this bit? */
+ new_dir = ((bytes[byte_num]) >> bit_num) & 1;
+
+ /* Allocate new node. */
+ newn = malloc(sizeof(*newn));
+ if (!newn) {
+ errno = ENOMEM;
+ return false;
+ }
+ newn->nul_byte = '\0';
+ newn->byte_num = byte_num;
+ newn->bit_num = bit_num;
+ if (unlikely(!set_string(set, &newn->child[new_dir], member))) {
+ free(newn);
+ return false;
+ }
+
+ /* Find where to insert: not closest, but first which differs! */
+ np = set;
+ while (!np->u.s[0]) {
+ u8 direction = 0;
+
+ /* Special node which represents the empty string will
+ * break here too! */
+ if (np->u.n->byte_num > byte_num)
+ break;
+ /* Subtle: bit numbers are "backwards" for comparison */
+ if (np->u.n->byte_num == byte_num && np->u.n->bit_num < bit_num)
+ break;
+
+ if (np->u.n->byte_num < len) {
+ u8 c = bytes[np->u.n->byte_num];
+ direction = (c >> np->u.n->bit_num) & 1;
+ }
+ np = &np->u.n->child[direction];
+ }
+
+ newn->child[!new_dir]= *np;
+ np->u.n = newn;
+ return true;
+}
+
+char *strset_del(struct strset *set, const char *member)
+{
+ size_t len = strlen(member);
+ const u8 *bytes = (const u8 *)member;
+ struct strset *parent = NULL, *n;
+ const char *ret = NULL;
+ u8 direction = 0; /* prevent bogus gcc warning. */
+
+ /* Empty set? */
+ if (!set->u.n) {
+ errno = ENOENT;
+ return NULL;
+ }
+
+ /* Find closest, but keep track of parent. */
+ n = set;
+ /* Anything with first byte 0 is a node. */
+ while (!n->u.s[0]) {
+ u8 c = 0;
+
+ /* Special node which represents the empty string. */
+ if (unlikely(n->u.n->byte_num == (size_t)-1)) {
+ const char *empty_str = n->u.n->child[0].u.s;
+
+ if (member[0]) {
+ errno = ENOENT;
+ return NULL;
+ }
+
+ /* Sew empty string back so remaining logic works */
+ free(n->u.n);
+ n->u.s = empty_str;
+ break;
+ }
+
+ parent = n;
+ if (n->u.n->byte_num < len) {
+ c = bytes[n->u.n->byte_num];
+ direction = (c >> n->u.n->bit_num) & 1;
+ } else
+ direction = 0;
+ n = &n->u.n->child[direction];
+ }
+
+ /* Did we find it? */
+ if (!streq(member, n->u.s)) {
+ errno = ENOENT;
+ return NULL;
+ }
+
+ ret = n->u.s;
+
+ if (!parent) {
+ /* We deleted last node. */
+ set->u.n = NULL;
+ } else {
+ struct node *old = parent->u.n;
+ /* Raise other node to parent. */
+ *parent = old->child[!direction];
+ free(old);
+ }
+
+ return (char *)ret;
+}
+
+static bool iterate(struct strset n,
+ bool (*handle)(const char *, void *), const void *data)
+{
+ if (n.u.s[0])
+ return handle(n.u.s, (void *)data);
+ if (unlikely(n.u.n->byte_num == (size_t)-1))
+ return handle(n.u.n->child[0].u.s, (void *)data);
+
+ return iterate(n.u.n->child[0], handle, data)
+ && iterate(n.u.n->child[1], handle, data);
+}
+
+void strset_iterate_(const struct strset *set,
+ bool (*handle)(const char *, void *), const void *data)
+{
+ /* Empty set? */
+ if (!set->u.n)
+ return;
+
+ iterate(*set, handle, data);
+}
+
+const struct strset *strset_prefix(const struct strset *set, const char *prefix)
+{
+ const struct strset *n, *top;
+ size_t len = strlen(prefix);
+ const u8 *bytes = (const u8 *)prefix;
+
+ /* Empty set -> return empty set. */
+ if (!set->u.n)
+ return set;
+
+ top = n = set;
+
+ /* We walk to find the top, but keep going to check prefix matches. */
+ while (!n->u.s[0]) {
+ u8 c = 0, direction;
+
+ /* Special node which represents the empty string. */
+ if (unlikely(n->u.n->byte_num == (size_t)-1)) {
+ n = &n->u.n->child[0];
+ break;
+ }
+
+ if (n->u.n->byte_num < len)
+ c = bytes[n->u.n->byte_num];
+
+ direction = (c >> n->u.n->bit_num) & 1;
+ n = &n->u.n->child[direction];
+ if (c)
+ top = n;
+ }
+
+ if (!strstarts(n->u.s, prefix)) {
+ /* Convenient return for prefixes which do not appear in set. */
+ static const struct strset empty_set;
+ return &empty_set;
+ }
+
+ return top;
+}
+
+static void clear(struct strset n)
+{
+ if (!n.u.s[0]) {
+ if (likely(n.u.n->byte_num != (size_t)-1)) {
+ clear(n.u.n->child[0]);
+ clear(n.u.n->child[1]);
+ }
+ free(n.u.n);
+ }
+}
+
+void strset_clear(struct strset *set)
+{
+ if (set->u.n)
+ clear(*set);
+ set->u.n = NULL;
+}
diff --git a/ccan/ccan/strset/strset.h b/ccan/ccan/strset/strset.h
new file mode 100644
index 0000000..9d6f1ae
--- /dev/null
+++ b/ccan/ccan/strset/strset.h
@@ -0,0 +1,167 @@
+#ifndef CCAN_STRSET_H
+#define CCAN_STRSET_H
+#include "config.h"
+#include <ccan/typesafe_cb/typesafe_cb.h>
+#include <stdlib.h>
+#include <stdbool.h>
+
+/**
+ * struct strset - representation of a string set
+ *
+ * It's exposed here to allow you to embed it and so we can inline the
+ * trivial functions.
+ */
+struct strset {
+ union {
+ struct node *n;
+ const char *s;
+ } u;
+};
+
+/**
+ * strset_init - initialize a string set (empty)
+ *
+ * For completeness; if you've arranged for it to be NULL already you don't
+ * need this.
+ *
+ * Example:
+ * struct strset set;
+ *
+ * strset_init(&set);
+ */
+static inline void strset_init(struct strset *set)
+{
+ set->u.n = NULL;
+}
+
+/**
+ * strset_empty - is this string set empty?
+ * @set: the set.
+ *
+ * Example:
+ * if (!strset_empty(&set))
+ * abort();
+ */
+static inline bool strset_empty(const struct strset *set)
+{
+ return set->u.n == NULL;
+}
+
+/**
+ * strset_get - is this a member of this string set?
+ * @set: the set.
+ * @member: the string to search for.
+ *
+ * Returns the member, or NULL if it isn't in the set (and sets errno
+ * = ENOENT).
+ *
+ * Example:
+ * if (strset_get(&set, "hello"))
+ * printf("hello is in the set\n");
+ */
+char *strset_get(const struct strset *set, const char *member);
+
+/**
+ * strset_add - place a member in the string set.
+ * @set: the set.
+ * @member: the string to place in the set.
+ *
+ * This returns false if we run out of memory (errno = ENOMEM), or
+ * (more normally) if that string already appears in the set (EEXIST).
+ *
+ * Note that the pointer is placed in the set, the string is not copied. If
+ * you want a copy in the set, use strdup().
+ *
+ * Example:
+ * if (!strset_add(&set, "goodbye"))
+ * printf("goodbye was already in the set\n");
+ */
+bool strset_add(struct strset *set, const char *member);
+
+/**
+ * strset_del - remove a member from the string set.
+ * @set: the set.
+ * @member: the string to remove from the set.
+ *
+ * This returns the string which was passed to strset_add(), or NULL if
+ * the string was not in the map (in which case it sets errno = ENOENT).
+ *
+ * This means that if you allocated a string (eg. using strdup()), you can
+ * free it here.
+ *
+ * Example:
+ * if (!strset_del(&set, "goodbye"))
+ * printf("goodbye was not in the set?\n");
+ */
+char *strset_del(struct strset *set, const char *member);
+
+/**
+ * strset_clear - remove every member from the set.
+ * @set: the set.
+ *
+ * The set will be empty after this.
+ *
+ * Example:
+ * strset_clear(&set);
+ */
+void strset_clear(struct strset *set);
+
+/**
+ * strset_iterate - ordered iteration over a set
+ * @set: the set.
+ * @handle: the function to call.
+ * @arg: the argument for the function (types should match).
+ *
+ * You should not alter the set within the @handle function! If it returns
+ * false, the iteration will stop.
+ *
+ * Example:
+ * static bool dump_some(const char *member, int *num)
+ * {
+ * // Only dump out num nodes.
+ * if (*(num--) == 0)
+ * return false;
+ * printf("%s\n", member);
+ * return true;
+ * }
+ *
+ * static void dump_set(const struct strset *set)
+ * {
+ * int max = 100;
+ * strset_iterate(set, dump_some, &max);
+ * if (max < 0)
+ * printf("... (truncated to 100 entries)\n");
+ * }
+ */
+#define strset_iterate(set, handle, arg) \
+ strset_iterate_((set), typesafe_cb_preargs(bool, void *, \
+ (handle), (arg), \
+ const char *), \
+ (arg))
+void strset_iterate_(const struct strset *set,
+ bool (*handle)(const char *, void *), const void *data);
+
+
+/**
+ * strset_prefix - return a subset matching a prefix
+ * @set: the set.
+ * @prefix: the prefix.
+ *
+ * This returns a pointer into @set, so don't alter @set while using
+ * the return value. You can use strset_iterate(), strset_test() or
+ * strset_empty() on the returned pointer.
+ *
+ * Example:
+ * static void dump_prefix(const struct strset *set, const char *prefix)
+ * {
+ * int max = 100;
+ * printf("Nodes with prefix %s:\n", prefix);
+ * strset_iterate(strset_prefix(set, prefix), dump_some, &max);
+ * if (max < 0)
+ * printf("... (truncated to 100 entries)\n");
+ * }
+ */
+const struct strset *strset_prefix(const struct strset *set,
+ const char *prefix);
+
+#endif /* CCAN_STRSET_H */
diff --git a/ccan/ccan/typesafe_cb/LICENSE b/ccan/ccan/typesafe_cb/LICENSE
new file mode 120000
index 0000000..b7951da
--- /dev/null
+++ b/ccan/ccan/typesafe_cb/LICENSE
@@ -0,0 +1 @@
+../../licenses/CC0 \ No newline at end of file
diff --git a/ccan/ccan/typesafe_cb/typesafe_cb.h b/ccan/ccan/typesafe_cb/typesafe_cb.h
new file mode 100644
index 0000000..126d325
--- /dev/null
+++ b/ccan/ccan/typesafe_cb/typesafe_cb.h
@@ -0,0 +1,134 @@
+/* CC0 (Public domain) - see LICENSE file for details */
+#ifndef CCAN_TYPESAFE_CB_H
+#define CCAN_TYPESAFE_CB_H
+#include "config.h"
+
+#if HAVE_TYPEOF && HAVE_BUILTIN_CHOOSE_EXPR && HAVE_BUILTIN_TYPES_COMPATIBLE_P
+/**
+ * typesafe_cb_cast - only cast an expression if it matches a given type
+ * @desttype: the type to cast to
+ * @oktype: the type we allow
+ * @expr: the expression to cast
+ *
+ * This macro is used to create functions which allow multiple types.
+ * The result of this macro is used somewhere that a @desttype type is
+ * expected: if @expr is exactly of type @oktype, then it will be
+ * cast to @desttype type, otherwise left alone.
+ *
+ * This macro can be used in static initializers.
+ *
+ * This is merely useful for warnings: if the compiler does not
+ * support the primitives required for typesafe_cb_cast(), it becomes an
+ * unconditional cast, and the @oktype argument is not used. In
+ * particular, this means that @oktype can be a type which uses the
+ * "typeof": it will not be evaluated if typeof is not supported.
+ *
+ * Example:
+ * // We can take either an unsigned long or a void *.
+ * void _set_some_value(void *val);
+ * #define set_some_value(e) \
+ * _set_some_value(typesafe_cb_cast(void *, unsigned long, (e)))
+ */
+#define typesafe_cb_cast(desttype, oktype, expr) \
+ __builtin_choose_expr( \
+ __builtin_types_compatible_p(__typeof__(0?(expr):(expr)), \
+ oktype), \
+ (desttype)(expr), (expr))
+#else
+#define typesafe_cb_cast(desttype, oktype, expr) ((desttype)(expr))
+#endif
+
+/**
+ * typesafe_cb_cast3 - only cast an expression if it matches given types
+ * @desttype: the type to cast to
+ * @ok1: the first type we allow
+ * @ok2: the second type we allow
+ * @ok3: the third type we allow
+ * @expr: the expression to cast
+ *
+ * This is a convenient wrapper for multiple typesafe_cb_cast() calls.
+ * You can chain them inside each other (ie. use typesafe_cb_cast()
+ * for expr) if you need more than 3 arguments.
+ *
+ * Example:
+ * // We can take either a long, unsigned long, void * or a const void *.
+ * void _set_some_value(void *val);
+ * #define set_some_value(expr) \
+ * _set_some_value(typesafe_cb_cast3(void *,, \
+ * long, unsigned long, const void *,\
+ * (expr)))
+ */
+#define typesafe_cb_cast3(desttype, ok1, ok2, ok3, expr) \
+ typesafe_cb_cast(desttype, ok1, \
+ typesafe_cb_cast(desttype, ok2, \
+ typesafe_cb_cast(desttype, ok3, \
+ (expr))))
+
+/**
+ * typesafe_cb - cast a callback function if it matches the arg
+ * @rtype: the return type of the callback function
+ * @atype: the (pointer) type which the callback function expects.
+ * @fn: the callback function to cast
+ * @arg: the (pointer) argument to hand to the callback function.
+ *
+ * If a callback function takes a single argument, this macro does
+ * appropriate casts to a function which takes a single atype argument if the
+ * callback provided matches the @arg.
+ *
+ * It is assumed that @arg is of pointer type: usually @arg is passed
+ * or assigned to a void * elsewhere anyway.
+ *
+ * Example:
+ * void _register_callback(void (*fn)(void *arg), void *arg);
+ * #define register_callback(fn, arg) \
+ * _register_callback(typesafe_cb(void, (fn), void*, (arg)), (arg))
+ */
+#define typesafe_cb(rtype, atype, fn, arg) \
+ typesafe_cb_cast(rtype (*)(atype), \
+ rtype (*)(__typeof__(arg)), \
+ (fn))
+
+/**
+ * typesafe_cb_preargs - cast a callback function if it matches the arg
+ * @rtype: the return type of the callback function
+ * @atype: the (pointer) type which the callback function expects.
+ * @fn: the callback function to cast
+ * @arg: the (pointer) argument to hand to the callback function.
+ *
+ * This is a version of typesafe_cb() for callbacks that take other arguments
+ * before the @arg.
+ *
+ * Example:
+ * void _register_callback(void (*fn)(int, void *arg), void *arg);
+ * #define register_callback(fn, arg) \
+ * _register_callback(typesafe_cb_preargs(void, void *, \
+ * (fn), (arg), int), \
+ * (arg))
+ */
+#define typesafe_cb_preargs(rtype, atype, fn, arg, ...) \
+ typesafe_cb_cast(rtype (*)(__VA_ARGS__, atype), \
+ rtype (*)(__VA_ARGS__, __typeof__(arg)), \
+ (fn))
+
+/**
+ * typesafe_cb_postargs - cast a callback function if it matches the arg
+ * @rtype: the return type of the callback function
+ * @atype: the (pointer) type which the callback function expects.
+ * @fn: the callback function to cast
+ * @arg: the (pointer) argument to hand to the callback function.
+ *
+ * This is a version of typesafe_cb() for callbacks that take other arguments
+ * after the @arg.
+ *
+ * Example:
+ * void _register_callback(void (*fn)(void *arg, int), void *arg);
+ * #define register_callback(fn, arg) \
+ * _register_callback(typesafe_cb_postargs(void, (fn), void *, \
+ * (arg), int), \
+ * (arg))
+ */
+#define typesafe_cb_postargs(rtype, atype, fn, arg, ...) \
+ typesafe_cb_cast(rtype (*)(atype, __VA_ARGS__), \
+ rtype (*)(__typeof__(arg), __VA_ARGS__), \
+ (fn))
+#endif /* CCAN_CAST_IF_TYPE_H */