summaryrefslogtreecommitdiffstats
path: root/include/import
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-13 12:18:05 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-13 12:18:05 +0000
commitb46aad6df449445a9fc4aa7b32bd40005438e3f7 (patch)
tree751aa858ca01f35de800164516b298887382919d /include/import
parentInitial commit. (diff)
downloadhaproxy-b46aad6df449445a9fc4aa7b32bd40005438e3f7.tar.xz
haproxy-b46aad6df449445a9fc4aa7b32bd40005438e3f7.zip
Adding upstream version 2.9.5.upstream/2.9.5
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'include/import')
-rw-r--r--include/import/atomic-ops.h1991
-rw-r--r--include/import/eb32sctree.h121
-rw-r--r--include/import/eb32tree.h482
-rw-r--r--include/import/eb64tree.h575
-rw-r--r--include/import/ebimtree.h324
-rw-r--r--include/import/ebistree.h329
-rw-r--r--include/import/ebmbtree.h850
-rw-r--r--include/import/ebpttree.h156
-rw-r--r--include/import/ebsttree.h324
-rw-r--r--include/import/ebtree-t.h217
-rw-r--r--include/import/ebtree.h857
-rw-r--r--include/import/ist.h957
-rw-r--r--include/import/lru.h75
-rw-r--r--include/import/mjson.h209
-rw-r--r--include/import/plock.h1422
-rw-r--r--include/import/sha1.h35
-rw-r--r--include/import/slz-tables.h257
-rw-r--r--include/import/slz.h200
-rw-r--r--include/import/xxhash.h6773
19 files changed, 16154 insertions, 0 deletions
diff --git a/include/import/atomic-ops.h b/include/import/atomic-ops.h
new file mode 100644
index 0000000..29674db
--- /dev/null
+++ b/include/import/atomic-ops.h
@@ -0,0 +1,1991 @@
+/* generic atomic operations used by progressive locks
+ *
+ * Copyright (C) 2012-2022 Willy Tarreau <w@1wt.eu>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef PL_ATOMIC_OPS_H
+#define PL_ATOMIC_OPS_H
+
+/* The definitions below exist in two forms:
+ * - fallback form (_pl_*)
+ * - preferred form (pl_*)
+ *
+ * As a general rule, given that C11 atomics tend to offer more flexibility to
+ * the compiler, these should set the preferred form, and the arch-specific
+ * code should set the fallback code. But it's possible for arch-specific code
+ * to set a preferred form, in which case it will simply be used over the other
+ * ones.
+ */
+
+/*
+ * Architecture-specific versions of the various operations
+ */
+
+/*
+ * ###### ix86 / x86_64 below ######
+ */
+#if defined(__i386__) || defined (__i486__) || defined (__i586__) || defined (__i686__) || defined (__x86_64__)
+
+/* for compilers supporting condition flags on output, let's directly return them */
+#if defined(__GCC_ASM_FLAG_OUTPUTS__)
+#define X86_COND_C_TO_REG(reg) ""
+#define X86_COND_Z_TO_REG(reg) ""
+#define X86_COND_NZ_TO_REG(reg) ""
+#define X86_COND_C_RESULT(var) "=@ccc"(var)
+#define X86_COND_Z_RESULT(var) "=@ccz"(var)
+#define X86_COND_NZ_RESULT(var) "=@ccnz"(var)
+#else
+#define X86_COND_C_TO_REG(reg) "sbb %" #reg ", %" #reg "\n\t"
+#define X86_COND_Z_TO_REG(reg) "sete %" #reg "\n\t"
+#define X86_COND_NZ_TO_REG(reg) "setne %" #reg "\n\t"
+#define X86_COND_C_RESULT(var) "=r"(var)
+#define X86_COND_Z_RESULT(var) "=qm"(var)
+#define X86_COND_NZ_RESULT(var) "=qm"(var)
+#endif
+
+/* CPU relaxation while waiting (PAUSE instruction on x86) */
+#define pl_cpu_relax() do { \
+ asm volatile("rep;nop\n"); \
+ } while (0)
+
+/* full memory barrier using mfence when SSE2 is supported, falling back to
+ * "lock add %esp" (gcc uses "lock add" or "lock or").
+ */
+#if defined(__SSE2__)
+
+#define _pl_mb() do { \
+ asm volatile("mfence" ::: "memory"); \
+ } while (0)
+
+#elif defined(__x86_64__)
+
+#define _pl_mb() do { \
+ asm volatile("lock addl $0,0 (%%rsp)" ::: "memory", "cc"); \
+ } while (0)
+
+#else /* ix86 */
+
+#define _pl_mb() do { \
+ asm volatile("lock addl $0,0 (%%esp)" ::: "memory", "cc"); \
+ } while (0)
+
+#endif /* end of pl_mb() case for sse2/x86_64/x86 */
+
+/* load/store barriers are nops on x86 */
+#define _pl_mb_load() do { asm volatile("" ::: "memory"); } while (0)
+#define _pl_mb_store() do { asm volatile("" ::: "memory"); } while (0)
+
+/* atomic full/load/store are also nops on x86 */
+#define _pl_mb_ato() do { asm volatile("" ::: "memory"); } while (0)
+#define _pl_mb_ato_load() do { asm volatile("" ::: "memory"); } while (0)
+#define _pl_mb_ato_store() do { asm volatile("" ::: "memory"); } while (0)
+
+/* atomic load: on x86 it's just a volatile read */
+#define _pl_load_lax(ptr) _pl_load(ptr)
+#define _pl_load(ptr) ({ typeof(*(ptr)) __ptr = *(volatile typeof(ptr))ptr; __ptr; })
+
+/* atomic store: on x86 it's just a volatile write */
+#define _pl_store_lax(ptr) _pl_store(ptr)
+#define _pl_store(ptr, x) do { *((volatile typeof(ptr))(ptr)) = (typeof(*ptr))(x); } while (0)
+
+/* increment integer value pointed to by pointer <ptr>, and return non-zero if
+ * result is non-null.
+ */
+#define _pl_inc_lax(ptr) _pl_inc(ptr)
+#define _pl_inc_acq(ptr) _pl_inc(ptr)
+#define _pl_inc_rel(ptr) _pl_inc(ptr)
+#define _pl_inc(ptr) ( \
+ (sizeof(long) == 8 && sizeof(*(ptr)) == 8) ? ({ \
+ unsigned char ret; \
+ asm volatile("lock incq %0\n" \
+ X86_COND_NZ_TO_REG(1) \
+ : "+m" (*(ptr)), X86_COND_NZ_RESULT(ret) \
+ : \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 4) ? ({ \
+ unsigned char ret; \
+ asm volatile("lock incl %0\n" \
+ X86_COND_NZ_TO_REG(1) \
+ : "+m" (*(ptr)), X86_COND_NZ_RESULT(ret) \
+ : \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 2) ? ({ \
+ unsigned char ret; \
+ asm volatile("lock incw %0\n" \
+ X86_COND_NZ_TO_REG(1) \
+ : "+m" (*(ptr)), X86_COND_NZ_RESULT(ret) \
+ : \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 1) ? ({ \
+ unsigned char ret; \
+ asm volatile("lock incb %0\n" \
+ X86_COND_NZ_TO_REG(1) \
+ : "+m" (*(ptr)), X86_COND_NZ_RESULT(ret) \
+ : \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_inc__(char *,int); \
+ if (sizeof(*(ptr)) != 1 && sizeof(*(ptr)) != 2 && \
+ sizeof(*(ptr)) != 4 && (sizeof(long) != 8 || sizeof(*(ptr)) != 8)) \
+ __unsupported_argument_size_for_pl_inc__(__FILE__,__LINE__); \
+ 0; \
+ }) \
+)
+
+/* decrement integer value pointed to by pointer <ptr>, and return non-zero if
+ * result is non-null.
+ */
+#define _pl_dec_lax(ptr) _pl_dec(ptr)
+#define _pl_dec_acq(ptr) _pl_dec(ptr)
+#define _pl_dec_rel(ptr) _pl_dec(ptr)
+#define _pl_dec(ptr) ( \
+ (sizeof(long) == 8 && sizeof(*(ptr)) == 8) ? ({ \
+ unsigned char ret; \
+ asm volatile("lock decq %0\n" \
+ X86_COND_NZ_TO_REG(1) \
+ : "+m" (*(ptr)), X86_COND_NZ_RESULT(ret) \
+ : \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 4) ? ({ \
+ unsigned char ret; \
+ asm volatile("lock decl %0\n" \
+ X86_COND_NZ_TO_REG(1) \
+ : "+m" (*(ptr)), X86_COND_NZ_RESULT(ret) \
+ : \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 2) ? ({ \
+ unsigned char ret; \
+ asm volatile("lock decw %0\n" \
+ X86_COND_NZ_TO_REG(1) \
+ : "+m" (*(ptr)), X86_COND_NZ_RESULT(ret) \
+ : \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 1) ? ({ \
+ unsigned char ret; \
+ asm volatile("lock decb %0\n" \
+ X86_COND_NZ_TO_REG(1) \
+ : "+m" (*(ptr)), X86_COND_NZ_RESULT(ret) \
+ : \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_dec__(char *,int); \
+ if (sizeof(*(ptr)) != 1 && sizeof(*(ptr)) != 2 && \
+ sizeof(*(ptr)) != 4 && (sizeof(long) != 8 || sizeof(*(ptr)) != 8)) \
+ __unsupported_argument_size_for_pl_dec__(__FILE__,__LINE__); \
+ 0; \
+ }) \
+)
+
+/* increment integer value pointed to by pointer <ptr>, no return */
+#define pl_inc_noret_lax(ptr) pl_inc_noret(ptr)
+#define pl_inc_noret_acq(ptr) pl_inc_noret(ptr)
+#define pl_inc_noret_rel(ptr) pl_inc_noret(ptr)
+#define pl_inc_noret(ptr) do { \
+ if (sizeof(long) == 8 && sizeof(*(ptr)) == 8) { \
+ asm volatile("lock incq %0\n" \
+ : "+m" (*(ptr)) \
+ : \
+ : "cc"); \
+ } else if (sizeof(*(ptr)) == 4) { \
+ asm volatile("lock incl %0\n" \
+ : "+m" (*(ptr)) \
+ : \
+ : "cc"); \
+ } else if (sizeof(*(ptr)) == 2) { \
+ asm volatile("lock incw %0\n" \
+ : "+m" (*(ptr)) \
+ : \
+ : "cc"); \
+ } else if (sizeof(*(ptr)) == 1) { \
+ asm volatile("lock incb %0\n" \
+ : "+m" (*(ptr)) \
+ : \
+ : "cc"); \
+ } else { \
+ void __unsupported_argument_size_for_pl_inc_noret__(char *,int); \
+ if (sizeof(*(ptr)) != 1 && sizeof(*(ptr)) != 2 && \
+ sizeof(*(ptr)) != 4 && (sizeof(long) != 8 || sizeof(*(ptr)) != 8)) \
+ __unsupported_argument_size_for_pl_inc_noret__(__FILE__,__LINE__); \
+ } \
+} while (0)
+
+/* decrement integer value pointed to by pointer <ptr>, no return */
+#define pl_dec_noret_lax(ptr) pl_dec_noret(ptr)
+#define pl_dec_noret_acq(ptr) pl_dec_noret(ptr)
+#define pl_dec_noret_rel(ptr) pl_dec_noret(ptr)
+#define pl_dec_noret(ptr) do { \
+ if (sizeof(long) == 8 && sizeof(*(ptr)) == 8) { \
+ asm volatile("lock decq %0\n" \
+ : "+m" (*(ptr)) \
+ : \
+ : "cc"); \
+ } else if (sizeof(*(ptr)) == 4) { \
+ asm volatile("lock decl %0\n" \
+ : "+m" (*(ptr)) \
+ : \
+ : "cc"); \
+ } else if (sizeof(*(ptr)) == 2) { \
+ asm volatile("lock decw %0\n" \
+ : "+m" (*(ptr)) \
+ : \
+ : "cc"); \
+ } else if (sizeof(*(ptr)) == 1) { \
+ asm volatile("lock decb %0\n" \
+ : "+m" (*(ptr)) \
+ : \
+ : "cc"); \
+ } else { \
+ void __unsupported_argument_size_for_pl_dec_noret__(char *,int); \
+ if (sizeof(*(ptr)) != 1 && sizeof(*(ptr)) != 2 && \
+ sizeof(*(ptr)) != 4 && (sizeof(long) != 8 || sizeof(*(ptr)) != 8)) \
+ __unsupported_argument_size_for_pl_dec_noret__(__FILE__,__LINE__); \
+ } \
+} while (0)
+
+/* add integer constant <x> to integer value pointed to by pointer <ptr>,
+ * no return. Size of <x> is not checked.
+ */
+#define _pl_add_noret_lax(ptr, x) _pl_add_noret(ptr, x)
+#define _pl_add_noret_acq(ptr, x) _pl_add_noret(ptr, x)
+#define _pl_add_noret_rel(ptr, x) _pl_add_noret(ptr, x)
+#define _pl_add_noret(ptr, x) do { \
+ if (sizeof(long) == 8 && sizeof(*(ptr)) == 8) { \
+ asm volatile("lock addq %1, %0\n" \
+ : "+m" (*(ptr)) \
+ : "er" ((unsigned long)(x)) \
+ : "cc"); \
+ } else if (sizeof(*(ptr)) == 4) { \
+ asm volatile("lock addl %1, %0\n" \
+ : "+m" (*(ptr)) \
+ : "er" ((unsigned int)(x)) \
+ : "cc"); \
+ } else if (sizeof(*(ptr)) == 2) { \
+ asm volatile("lock addw %1, %0\n" \
+ : "+m" (*(ptr)) \
+ : "er" ((unsigned short)(x)) \
+ : "cc"); \
+ } else if (sizeof(*(ptr)) == 1) { \
+ asm volatile("lock addb %1, %0\n" \
+ : "+m" (*(ptr)) \
+ : "er" ((unsigned char)(x)) \
+ : "cc"); \
+ } else { \
+ void __unsupported_argument_size_for_pl_add__(char *,int); \
+ if (sizeof(*(ptr)) != 1 && sizeof(*(ptr)) != 2 && \
+ sizeof(*(ptr)) != 4 && (sizeof(long) != 8 || sizeof(*(ptr)) != 8)) \
+ __unsupported_argument_size_for_pl_add__(__FILE__,__LINE__); \
+ } \
+} while (0)
+
+/* subtract integer constant <x> from integer value pointed to by pointer
+ * <ptr>, no return. Size of <x> is not checked.
+ */
+#define _pl_sub_noret_lax(ptr, x) _pl_sub_noret(ptr, x)
+#define _pl_sub_noret_acq(ptr, x) _pl_sub_noret(ptr, x)
+#define _pl_sub_noret_rel(ptr, x) _pl_sub_noret(ptr, x)
+#define _pl_sub_noret(ptr, x) do { \
+ if (sizeof(long) == 8 && sizeof(*(ptr)) == 8) { \
+ asm volatile("lock subq %1, %0\n" \
+ : "+m" (*(ptr)) \
+ : "er" ((unsigned long)(x)) \
+ : "cc"); \
+ } else if (sizeof(*(ptr)) == 4) { \
+ asm volatile("lock subl %1, %0\n" \
+ : "+m" (*(ptr)) \
+ : "er" ((unsigned int)(x)) \
+ : "cc"); \
+ } else if (sizeof(*(ptr)) == 2) { \
+ asm volatile("lock subw %1, %0\n" \
+ : "+m" (*(ptr)) \
+ : "er" ((unsigned short)(x)) \
+ : "cc"); \
+ } else if (sizeof(*(ptr)) == 1) { \
+ asm volatile("lock subb %1, %0\n" \
+ : "+m" (*(ptr)) \
+ : "er" ((unsigned char)(x)) \
+ : "cc"); \
+ } else { \
+ void __unsupported_argument_size_for_pl_sub__(char *,int); \
+ if (sizeof(*(ptr)) != 1 && sizeof(*(ptr)) != 2 && \
+ sizeof(*(ptr)) != 4 && (sizeof(long) != 8 || sizeof(*(ptr)) != 8)) \
+ __unsupported_argument_size_for_pl_sub__(__FILE__,__LINE__); \
+ } \
+} while (0)
+
+/* binary and integer value pointed to by pointer <ptr> with constant <x>, no
+ * return. Size of <x> is not checked.
+ */
+#define _pl_and_noret_lax(ptr, x) _pl_and_noret(ptr, x)
+#define _pl_and_noret_acq(ptr, x) _pl_and_noret(ptr, x)
+#define _pl_and_noret_rel(ptr, x) _pl_and_noret(ptr, x)
+#define _pl_and_noret(ptr, x) do { \
+ if (sizeof(long) == 8 && sizeof(*(ptr)) == 8) { \
+ asm volatile("lock andq %1, %0\n" \
+ : "+m" (*(ptr)) \
+ : "er" ((unsigned long)(x)) \
+ : "cc"); \
+ } else if (sizeof(*(ptr)) == 4) { \
+ asm volatile("lock andl %1, %0\n" \
+ : "+m" (*(ptr)) \
+ : "er" ((unsigned int)(x)) \
+ : "cc"); \
+ } else if (sizeof(*(ptr)) == 2) { \
+ asm volatile("lock andw %1, %0\n" \
+ : "+m" (*(ptr)) \
+ : "er" ((unsigned short)(x)) \
+ : "cc"); \
+ } else if (sizeof(*(ptr)) == 1) { \
+ asm volatile("lock andb %1, %0\n" \
+ : "+m" (*(ptr)) \
+ : "er" ((unsigned char)(x)) \
+ : "cc"); \
+ } else { \
+ void __unsupported_argument_size_for_pl_and__(char *,int); \
+ if (sizeof(*(ptr)) != 1 && sizeof(*(ptr)) != 2 && \
+ sizeof(*(ptr)) != 4 && (sizeof(long) != 8 || sizeof(*(ptr)) != 8)) \
+ __unsupported_argument_size_for_pl_and__(__FILE__,__LINE__); \
+ } \
+} while (0)
+
+/* binary or integer value pointed to by pointer <ptr> with constant <x>, no
+ * return. Size of <x> is not checked.
+ */
+#define _pl_or_noret_lax(ptr, x) _pl_or_noret(ptr, x)
+#define _pl_or_noret_acq(ptr, x) _pl_or_noret(ptr, x)
+#define _pl_or_noret_rel(ptr, x) _pl_or_noret(ptr, x)
+#define _pl_or_noret(ptr, x) do { \
+ if (sizeof(long) == 8 && sizeof(*(ptr)) == 8) { \
+ asm volatile("lock orq %1, %0\n" \
+ : "+m" (*(ptr)) \
+ : "er" ((unsigned long)(x)) \
+ : "cc"); \
+ } else if (sizeof(*(ptr)) == 4) { \
+ asm volatile("lock orl %1, %0\n" \
+ : "+m" (*(ptr)) \
+ : "er" ((unsigned int)(x)) \
+ : "cc"); \
+ } else if (sizeof(*(ptr)) == 2) { \
+ asm volatile("lock orw %1, %0\n" \
+ : "+m" (*(ptr)) \
+ : "er" ((unsigned short)(x)) \
+ : "cc"); \
+ } else if (sizeof(*(ptr)) == 1) { \
+ asm volatile("lock orb %1, %0\n" \
+ : "+m" (*(ptr)) \
+ : "er" ((unsigned char)(x)) \
+ : "cc"); \
+ } else { \
+ void __unsupported_argument_size_for_pl_or__(char *,int); \
+ if (sizeof(*(ptr)) != 1 && sizeof(*(ptr)) != 2 && \
+ sizeof(*(ptr)) != 4 && (sizeof(long) != 8 || sizeof(*(ptr)) != 8)) \
+ __unsupported_argument_size_for_pl_or__(__FILE__,__LINE__); \
+ } \
+} while (0)
+
+/* binary xor integer value pointed to by pointer <ptr> with constant <x>, no
+ * return. Size of <x> is not checked.
+ */
+#define _pl_xor_noret_lax(ptr, x) _pl_xor_noret(ptr, x)
+#define _pl_xor_noret_acq(ptr, x) _pl_xor_noret(ptr, x)
+#define _pl_xor_noret_rel(ptr, x) _pl_xor_noret(ptr, x)
+#define _pl_xor_noret(ptr, x) do { \
+ if (sizeof(long) == 8 && sizeof(*(ptr)) == 8) { \
+ asm volatile("lock xorq %1, %0\n" \
+ : "+m" (*(ptr)) \
+ : "er" ((unsigned long)(x)) \
+ : "cc"); \
+ } else if (sizeof(*(ptr)) == 4) { \
+ asm volatile("lock xorl %1, %0\n" \
+ : "+m" (*(ptr)) \
+ : "er" ((unsigned int)(x)) \
+ : "cc"); \
+ } else if (sizeof(*(ptr)) == 2) { \
+ asm volatile("lock xorw %1, %0\n" \
+ : "+m" (*(ptr)) \
+ : "er" ((unsigned short)(x)) \
+ : "cc"); \
+ } else if (sizeof(*(ptr)) == 1) { \
+ asm volatile("lock xorb %1, %0\n" \
+ : "+m" (*(ptr)) \
+ : "er" ((unsigned char)(x)) \
+ : "cc"); \
+ } else { \
+ void __unsupported_argument_size_for_pl_xor__(char *,int); \
+ if (sizeof(*(ptr)) != 1 && sizeof(*(ptr)) != 2 && \
+ sizeof(*(ptr)) != 4 && (sizeof(long) != 8 || sizeof(*(ptr)) != 8)) \
+ __unsupported_argument_size_for_pl_xor__(__FILE__,__LINE__); \
+ } \
+} while (0)
+
+/* test and reset bit <bit> in integer value pointed to by pointer <ptr>. Returns
+ * 0 if the bit was not set, or ~0 of the same type as *ptr if it was set. Note
+ * that there is no 8-bit equivalent operation.
+ */
+#define pl_btr_lax(ptr, bit) pl_btr(ptr, bit)
+#define pl_btr_acq(ptr, bit) pl_btr(ptr, bit)
+#define pl_btr_rel(ptr, bit) pl_btr(ptr, bit)
+#define pl_btr(ptr, bit) ( \
+ (sizeof(long) == 8 && sizeof(*(ptr)) == 8) ? ({ \
+ unsigned long ret; \
+ asm volatile("lock btrq %2, %0\n\t" \
+ X86_COND_C_TO_REG(1) \
+ : "+m" (*(ptr)), X86_COND_C_RESULT(ret) \
+ : "Ir" ((unsigned long)(bit)) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 4) ? ({ \
+ unsigned int ret; \
+ asm volatile("lock btrl %2, %0\n\t" \
+ X86_COND_C_TO_REG(1) \
+ : "+m" (*(ptr)), X86_COND_C_RESULT(ret) \
+ : "Ir" ((unsigned int)(bit)) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 2) ? ({ \
+ unsigned short ret; \
+ asm volatile("lock btrw %2, %0\n\t" \
+ X86_COND_C_TO_REG(1) \
+ : "+m" (*(ptr)), X86_COND_C_RESULT(ret) \
+ : "Ir" ((unsigned short)(bit)) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_btr__(char *,int); \
+ if (sizeof(*(ptr)) != 1 && sizeof(*(ptr)) != 2 && \
+ sizeof(*(ptr)) != 4 && (sizeof(long) != 8 || sizeof(*(ptr)) != 8)) \
+ __unsupported_argument_size_for_pl_btr__(__FILE__,__LINE__); \
+ 0; \
+ }) \
+)
+
+/* test and set bit <bit> in integer value pointed to by pointer <ptr>. Returns
+ * 0 if the bit was not set, or ~0 of the same type as *ptr if it was set. Note
+ * that there is no 8-bit equivalent operation.
+ */
+#define pl_bts_lax(ptr, bit) pl_bts(ptr, bit)
+#define pl_bts_acq(ptr, bit) pl_bts(ptr, bit)
+#define pl_bts_rel(ptr, bit) pl_bts(ptr, bit)
+#define pl_bts(ptr, bit) ( \
+ (sizeof(long) == 8 && sizeof(*(ptr)) == 8) ? ({ \
+ unsigned long ret; \
+ asm volatile("lock btsq %2, %0\n\t" \
+ X86_COND_C_TO_REG(1) \
+ : "+m" (*(ptr)), X86_COND_C_RESULT(ret) \
+ : "Ir" ((unsigned long)(bit)) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 4) ? ({ \
+ unsigned int ret; \
+ asm volatile("lock btsl %2, %0\n\t" \
+ X86_COND_C_TO_REG(1) \
+ : "+m" (*(ptr)), X86_COND_C_RESULT(ret) \
+ : "Ir" ((unsigned int)(bit)) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 2) ? ({ \
+ unsigned short ret; \
+ asm volatile("lock btsw %2, %0\n\t" \
+ X86_COND_C_TO_REG(1) \
+ : "+m" (*(ptr)), X86_COND_C_RESULT(ret) \
+ : "Ir" ((unsigned short)(bit)) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_bts__(char *,int); \
+ if (sizeof(*(ptr)) != 1 && sizeof(*(ptr)) != 2 && \
+ sizeof(*(ptr)) != 4 && (sizeof(long) != 8 || sizeof(*(ptr)) != 8)) \
+ __unsupported_argument_size_for_pl_bts__(__FILE__,__LINE__); \
+ 0; \
+ }) \
+)
+
+/* Note: for an unclear reason, gcc's __sync_fetch_and_add() implementation
+ * produces less optimal than hand-crafted asm code so let's implement here the
+ * operations we need for the most common archs.
+ */
+
+/* fetch-and-add: fetch integer value pointed to by pointer <ptr>, add <x> to
+ * to <*ptr> and return the previous value.
+ * => THIS IS LEGACY, USE _pl_ldadd() INSTEAD.
+ */
+#define _pl_xadd(ptr, x) ( \
+ (sizeof(long) == 8 && sizeof(*(ptr)) == 8) ? ({ \
+ unsigned long ret = (unsigned long)(x); \
+ asm volatile("lock xaddq %0, %1\n" \
+ : "=r" (ret), "+m" (*(ptr)) \
+ : "0" (ret) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 4) ? ({ \
+ unsigned int ret = (unsigned int)(x); \
+ asm volatile("lock xaddl %0, %1\n" \
+ : "=r" (ret), "+m" (*(ptr)) \
+ : "0" (ret) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 2) ? ({ \
+ unsigned short ret = (unsigned short)(x); \
+ asm volatile("lock xaddw %0, %1\n" \
+ : "=r" (ret), "+m" (*(ptr)) \
+ : "0" (ret) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 1) ? ({ \
+ unsigned char ret = (unsigned char)(x); \
+ asm volatile("lock xaddb %0, %1\n" \
+ : "=r" (ret), "+m" (*(ptr)) \
+ : "0" (ret) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_xadd__(char *,int); \
+ if (sizeof(*(ptr)) != 1 && sizeof(*(ptr)) != 2 && \
+ sizeof(*(ptr)) != 4 && (sizeof(long) != 8 || sizeof(*(ptr)) != 8)) \
+ __unsupported_argument_size_for_pl_xadd__(__FILE__,__LINE__); \
+ 0; \
+ }) \
+)
+
+/* fetch-and-add: fetch integer value pointed to by pointer <ptr>, add <x> to
+ * to <*ptr> and return the previous value.
+ */
+#define _pl_ldadd_lax(ptr, x) _pl_ldadd(ptr, x)
+#define _pl_ldadd_acq(ptr, x) _pl_ldadd(ptr, x)
+#define _pl_ldadd_rel(ptr, x) _pl_ldadd(ptr, x)
+#define _pl_ldadd(ptr, x) ( \
+ (sizeof(long) == 8 && sizeof(*(ptr)) == 8) ? ({ \
+ unsigned long ret = (unsigned long)(x); \
+ asm volatile("lock xaddq %0, %1\n" \
+ : "=r" (ret), "+m" (*(ptr)) \
+ : "0" (ret) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 4) ? ({ \
+ unsigned int ret = (unsigned int)(x); \
+ asm volatile("lock xaddl %0, %1\n" \
+ : "=r" (ret), "+m" (*(ptr)) \
+ : "0" (ret) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 2) ? ({ \
+ unsigned short ret = (unsigned short)(x); \
+ asm volatile("lock xaddw %0, %1\n" \
+ : "=r" (ret), "+m" (*(ptr)) \
+ : "0" (ret) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 1) ? ({ \
+ unsigned char ret = (unsigned char)(x); \
+ asm volatile("lock xaddb %0, %1\n" \
+ : "=r" (ret), "+m" (*(ptr)) \
+ : "0" (ret) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_ldadd__(char *,int); \
+ if (sizeof(*(ptr)) != 1 && sizeof(*(ptr)) != 2 && \
+ sizeof(*(ptr)) != 4 && (sizeof(long) != 8 || sizeof(*(ptr)) != 8)) \
+ __unsupported_argument_size_for_pl_ldadd__(__FILE__,__LINE__); \
+ 0; \
+ }) \
+)
+
+/* fetch-and-sub: fetch integer value pointed to by pointer <ptr>, add -<x> to
+ * to <*ptr> and return the previous value.
+ */
+#define _pl_ldsub_lax(ptr, x) _pl_ldsub(ptr, x)
+#define _pl_ldsub_acq(ptr, x) _pl_ldsub(ptr, x)
+#define _pl_ldsub_rel(ptr, x) _pl_ldsub(ptr, x)
+#define _pl_ldsub(ptr, x) ( \
+ (sizeof(long) == 8 && sizeof(*(ptr)) == 8) ? ({ \
+ unsigned long ret = (unsigned long)(-x); \
+ asm volatile("lock xaddq %0, %1\n" \
+ : "=r" (ret), "+m" (*(ptr)) \
+ : "0" (ret) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 4) ? ({ \
+ unsigned int ret = (unsigned int)(-x); \
+ asm volatile("lock xaddl %0, %1\n" \
+ : "=r" (ret), "+m" (*(ptr)) \
+ : "0" (ret) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 2) ? ({ \
+ unsigned short ret = (unsigned short)(-x); \
+ asm volatile("lock xaddw %0, %1\n" \
+ : "=r" (ret), "+m" (*(ptr)) \
+ : "0" (ret) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 1) ? ({ \
+ unsigned char ret = (unsigned char)(-x); \
+ asm volatile("lock xaddb %0, %1\n" \
+ : "=r" (ret), "+m" (*(ptr)) \
+ : "0" (ret) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_ldsub__(char *,int); \
+ if (sizeof(*(ptr)) != 1 && sizeof(*(ptr)) != 2 && \
+ sizeof(*(ptr)) != 4 && (sizeof(long) != 8 || sizeof(*(ptr)) != 8)) \
+ __unsupported_argument_size_for_pl_ldsub__(__FILE__,__LINE__); \
+ 0; \
+ }) \
+)
+
+/* exchange value <x> with integer value pointed to by pointer <ptr>, and return
+ * previous <*ptr> value. <x> must be of the same size as <*ptr>.
+ */
+#define _pl_xchg(ptr, x) ( \
+ (sizeof(long) == 8 && sizeof(*(ptr)) == 8) ? ({ \
+ unsigned long ret = (unsigned long)(x); \
+ asm volatile("xchgq %0, %1\n" \
+ : "=r" (ret), "+m" (*(ptr)) \
+ : "0" (ret) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 4) ? ({ \
+ unsigned int ret = (unsigned int)(x); \
+ asm volatile("xchgl %0, %1\n" \
+ : "=r" (ret), "+m" (*(ptr)) \
+ : "0" (ret) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 2) ? ({ \
+ unsigned short ret = (unsigned short)(x); \
+ asm volatile("xchgw %0, %1\n" \
+ : "=r" (ret), "+m" (*(ptr)) \
+ : "0" (ret) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 1) ? ({ \
+ unsigned char ret = (unsigned char)(x); \
+ asm volatile("xchgb %0, %1\n" \
+ : "=r" (ret), "+m" (*(ptr)) \
+ : "0" (ret) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_xchg__(char *,int); \
+ if (sizeof(*(ptr)) != 1 && sizeof(*(ptr)) != 2 && \
+ sizeof(*(ptr)) != 4 && (sizeof(long) != 8 || sizeof(*(ptr)) != 8)) \
+ __unsupported_argument_size_for_pl_xchg__(__FILE__,__LINE__); \
+ 0; \
+ }) \
+)
+
+/* compare integer value <*ptr> with <old> and exchange it with <new> if
+ * it matches, and return <old>. <old> and <new> must be of the same size as
+ * <*ptr>.
+ */
+#define _pl_cmpxchg(ptr, old, new) ( \
+ (sizeof(long) == 8 && sizeof(*(ptr)) == 8) ? ({ \
+ unsigned long ret; \
+ asm volatile("lock cmpxchgq %2,%1" \
+ : "=a" (ret), "+m" (*(ptr)) \
+ : "r" ((unsigned long)(new)), \
+ "0" ((unsigned long)(old)) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 4) ? ({ \
+ unsigned int ret; \
+ asm volatile("lock cmpxchgl %2,%1" \
+ : "=a" (ret), "+m" (*(ptr)) \
+ : "r" ((unsigned int)(new)), \
+ "0" ((unsigned int)(old)) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 2) ? ({ \
+ unsigned short ret; \
+ asm volatile("lock cmpxchgw %2,%1" \
+ : "=a" (ret), "+m" (*(ptr)) \
+ : "r" ((unsigned short)(new)), \
+ "0" ((unsigned short)(old)) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 1) ? ({ \
+ unsigned char ret; \
+ asm volatile("lock cmpxchgb %2,%1" \
+ : "=a" (ret), "+m" (*(ptr)) \
+ : "r" ((unsigned char)(new)), \
+ "0" ((unsigned char)(old)) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_cmpxchg__(char *,int); \
+ if (sizeof(*(ptr)) != 1 && sizeof(*(ptr)) != 2 && \
+ sizeof(*(ptr)) != 4 && (sizeof(long) != 8 || sizeof(*(ptr)) != 8)) \
+ __unsupported_argument_size_for_pl_cmpxchg__(__FILE__,__LINE__); \
+ 0; \
+ }) \
+)
+
+/*
+ * ##### ARM64 (aarch64) below #####
+ */
+#elif defined(__aarch64__)
+
+/* This was shown to improve fairness on modern ARMv8 such as Neoverse N1 */
+#define pl_cpu_relax() do { \
+ asm volatile("isb" ::: "memory"); \
+ } while (0)
+
+/* full/load/store barriers */
+#define _pl_mb() do { asm volatile("dmb ish" ::: "memory"); } while (0)
+#define _pl_mb_load() do { asm volatile("dmb ishld" ::: "memory"); } while (0)
+#define _pl_mb_store() do { asm volatile("dmb ishst" ::: "memory"); } while (0)
+
+/* atomic full/load/store */
+#define _pl_mb_ato() do { asm volatile("dmb ish" ::: "memory"); } while (0)
+#define _pl_mb_ato_load() do { asm volatile("dmb ishld" ::: "memory"); } while (0)
+#define _pl_mb_ato_store() do { asm volatile("dmb ishst" ::: "memory"); } while (0)
+
+#endif // end of arch-specific code
+
+
+/*
+ * Generic code using the C11 __atomic API for functions not defined above.
+ * These are usable from gcc-4.7 and clang. We'll simply rely on the macros
+ * defining the memory orders to detect them. All operations are not
+ * necessarily defined, so some fallbacks to the default methods might still
+ * be necessary.
+ */
+
+
+#if defined(__ATOMIC_RELAXED) && defined(__ATOMIC_CONSUME) && defined(__ATOMIC_ACQUIRE) && \
+ defined(__ATOMIC_RELEASE) && defined(__ATOMIC_ACQ_REL) && defined(__ATOMIC_SEQ_CST)
+
+/* compiler-only memory barrier, for use around locks */
+#ifndef pl_barrier
+#define pl_barrier() __atomic_signal_fence(__ATOMIC_SEQ_CST)
+#endif
+
+/* full memory barrier */
+#ifndef pl_mb
+#define pl_mb() __atomic_thread_fence(__ATOMIC_SEQ_CST)
+#endif
+
+/* atomic load */
+#ifndef pl_load_lax
+#define pl_load_lax(ptr) __atomic_load_n(ptr, __ATOMIC_RELAXED)
+#endif
+
+#ifndef pl_load
+#define pl_load(ptr) __atomic_load_n(ptr, __ATOMIC_ACQUIRE)
+#endif
+
+/* atomic store */
+#ifndef pl_store_lax
+#define pl_store_lax(ptr, x) __atomic_store_n((ptr), (x), __ATOMIC_RELAXED)
+#endif
+
+#ifndef pl_store
+#define pl_store(ptr, x) __atomic_store_n((ptr), (x), __ATOMIC_RELEASE)
+#endif
+
+/* increment integer value pointed to by pointer <ptr>, and return non-zero if
+ * result is non-null.
+ */
+#ifndef pl_inc_lax
+#define pl_inc_lax(ptr) (__atomic_add_fetch((ptr), 1, __ATOMIC_RELAXED) != 0)
+#endif
+
+#ifndef pl_inc_acq
+#define pl_inc_acq(ptr) (__atomic_add_fetch((ptr), 1, __ATOMIC_ACQUIRE) != 0)
+#endif
+
+#ifndef pl_inc_rel
+#define pl_inc_rel(ptr) (__atomic_add_fetch((ptr), 1, __ATOMIC_RELEASE) != 0)
+#endif
+
+#ifndef pl_inc
+#define pl_inc(ptr) (__atomic_add_fetch((ptr), 1, __ATOMIC_SEQ_CST) != 0)
+#endif
+
+/* decrement integer value pointed to by pointer <ptr>, and return non-zero if
+ * result is non-null.
+ */
+#ifndef pl_dec_lax
+#define pl_dec_lax(ptr) (__atomic_sub_fetch((ptr), 1, __ATOMIC_RELAXED) != 0)
+#endif
+
+#ifndef pl_dec_acq
+#define pl_dec_acq(ptr) (__atomic_sub_fetch((ptr), 1, __ATOMIC_ACQUIRE) != 0)
+#endif
+
+#ifndef pl_dec_rel
+#define pl_dec_rel(ptr) (__atomic_sub_fetch((ptr), 1, __ATOMIC_RELEASE) != 0)
+#endif
+
+#ifndef pl_dec
+#define pl_dec(ptr) (__atomic_sub_fetch((ptr), 1, __ATOMIC_SEQ_CST) != 0)
+#endif
+
+/* increment integer value pointed to by pointer <ptr>, no return */
+#ifndef pl_inc_noret_lax
+#define pl_inc_noret_lax(ptr) ((void)__atomic_add_fetch((ptr), 1, __ATOMIC_RELAXED))
+#endif
+
+#ifndef pl_inc_noret_acq
+#define pl_inc_noret_acq(ptr) ((void)__atomic_add_fetch((ptr), 1, __ATOMIC_ACQUIRE))
+#endif
+
+#ifndef pl_inc_noret_rel
+#define pl_inc_noret_relc(ptr) ((void)__atomic_add_fetch((ptr), 1, __ATOMIC_RELEASE))
+#endif
+
+#ifndef pl_inc_noret
+#define pl_inc_noret(ptr) ((void)__atomic_add_fetch((ptr), 1, __ATOMIC_SEQ_CST))
+#endif
+
+/* decrement integer value pointed to by pointer <ptr>, no return */
+#ifndef pl_dec_noret_lax
+#define pl_dec_noret_lax(ptr) ((void)__atomic_sub_fetch((ptr), 1, __ATOMIC_RELAXED))
+#endif
+
+#ifndef pl_dec_noret_acq
+#define pl_dec_noret_acq(ptr) ((void)__atomic_sub_fetch((ptr), 1, __ATOMIC_ACQUIRE))
+#endif
+
+#ifndef pl_dec_noret_rel
+#define pl_dec_noret_relc(ptr) ((void)__atomic_sub_fetch((ptr), 1, __ATOMIC_RELEASE))
+#endif
+
+#ifndef pl_dec_noret
+#define pl_dec_noret(ptr) ((void)__atomic_sub_fetch((ptr), 1, __ATOMIC_SEQ_CST))
+#endif
+
+/* add integer constant <x> to integer value pointed to by pointer <ptr>,
+ * no return. Size of <x> is not checked.
+ */
+#ifndef pl_add_lax
+#define pl_add_lax(ptr, x) (__atomic_add_fetch((ptr), (x), __ATOMIC_RELAXED))
+#endif
+
+#ifndef pl_add_acq
+#define pl_add_acq(ptr, x) (__atomic_add_fetch((ptr), (x), __ATOMIC_ACQUIRE))
+#endif
+
+#ifndef pl_add_rel
+#define pl_add_relc(ptr, x) (__atomic_add_fetch((ptr), (x), __ATOMIC_RELEASE))
+#endif
+
+#ifndef pl_add
+#define pl_add(ptr, x) (__atomic_add_fetch((ptr), (x), __ATOMIC_SEQ_CST))
+#endif
+
+/* subtract integer constant <x> from integer value pointed to by pointer
+ * <ptr>, no return. Size of <x> is not checked.
+ */
+#ifndef pl_sub_lax
+#define pl_sub_lax(ptr, x) (__atomic_sub_fetch((ptr), (x), __ATOMIC_RELAXED))
+#endif
+
+#ifndef pl_sub_acq
+#define pl_sub_acq(ptr, x) (__atomic_sub_fetch((ptr), (x), __ATOMIC_ACQUIRE))
+#endif
+
+#ifndef pl_sub_rel
+#define pl_sub_relc(ptr, x) (__atomic_sub_fetch((ptr), (x), __ATOMIC_RELEASE))
+#endif
+
+#ifndef pl_sub
+#define pl_sub(ptr, x) (__atomic_sub_fetch((ptr), (x), __ATOMIC_SEQ_CST))
+#endif
+
+/* binary and integer value pointed to by pointer <ptr> with constant <x>, no
+ * return. Size of <x> is not checked.
+ */
+#ifndef pl_and_lax
+#define pl_and_lax(ptr, x) (__atomic_and_fetch((ptr), (x), __ATOMIC_RELAXED))
+#endif
+
+#ifndef pl_and_acq
+#define pl_and_acq(ptr, x) (__atomic_and_fetch((ptr), (x), __ATOMIC_ACQUIRE))
+#endif
+
+#ifndef pl_and_rel
+#define pl_and_relc(ptr, x) (__atomic_and_fetch((ptr), (x), __ATOMIC_RELEASE))
+#endif
+
+#ifndef pl_and
+#define pl_and(ptr, x) (__atomic_and_fetch((ptr), (x), __ATOMIC_SEQ_CST))
+#endif
+
+/* binary or integer value pointed to by pointer <ptr> with constant <x>, no
+ * return. Size of <x> is not checked.
+ */
+#ifndef pl_or_lax
+#define pl_or_lax(ptr, x) (__atomic_or_fetch((ptr), (x), __ATOMIC_RELAXED))
+#endif
+
+#ifndef pl_or_acq
+#define pl_or_acq(ptr, x) (__atomic_or_fetch((ptr), (x), __ATOMIC_ACQUIRE))
+#endif
+
+#ifndef pl_or_rel
+#define pl_or_relc(ptr, x) (__atomic_or_fetch((ptr), (x), __ATOMIC_RELEASE))
+#endif
+
+#ifndef pl_or
+#define pl_or(ptr, x) (__atomic_or_fetch((ptr), (x), __ATOMIC_SEQ_CST))
+#endif
+
+/* binary xor integer value pointed to by pointer <ptr> with constant <x>, no
+ * return. Size of <x> is not checked.
+ */
+#ifndef pl_xor_lax
+#define pl_xor_lax(ptr, x) (__atomic_xor_fetch((ptr), (x), __ATOMIC_RELAXED))
+#endif
+
+#ifndef pl_xor_acq
+#define pl_xor_acq(ptr, x) (__atomic_xor_fetch((ptr), (x), __ATOMIC_ACQUIRE))
+#endif
+
+#ifndef pl_xor_rel
+#define pl_xor_relc(ptr, x) (__atomic_xor_fetch((ptr), (x), __ATOMIC_RELEASE))
+#endif
+
+#ifndef pl_xor
+#define pl_xor(ptr, x) (__atomic_xor_fetch((ptr), (x), __ATOMIC_SEQ_CST))
+#endif
+
+/* fetch-and-add: fetch integer value pointed to by pointer <ptr>, add <x> to
+ * to <*ptr> and return the previous value.
+ * => THIS IS LEGACY, USE pl_ldadd() INSTEAD.
+ */
+#ifndef pl_xadd
+#define pl_xadd(ptr, x) (__atomic_fetch_add((ptr), (x), __ATOMIC_SEQ_CST))
+#endif
+
+/* exchange value <x> with integer value pointed to by pointer <ptr>, and return
+ * previous <*ptr> value. <x> must be of the same size as <*ptr>.
+ */
+#ifndef pl_xchg
+#define pl_xchg(ptr, x) (__atomic_exchange_n((ptr), (x), __ATOMIC_SEQ_CST))
+#endif
+
+/* compare integer value <*ptr> with <old> and exchange it with <new> if
+ * it matches, and return <old>. <old> and <new> must be of the same size as
+ * <*ptr>.
+ */
+#ifndef pl_cmpxchg
+#define pl_cmpxchg(ptr, old, new) ({ \
+ typeof(*ptr) __old = (old); \
+ __atomic_compare_exchange_n((ptr), &__old, (new), 0, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED); \
+ __old; })
+#endif
+
+/* fetch-and-add: fetch integer value pointed to by pointer <ptr>, add <x> to
+ * to <*ptr> and return the previous value.
+ */
+#ifndef pl_ldadd_lax
+#define pl_ldadd_lax(ptr, x) (__atomic_fetch_add((ptr), (x), __ATOMIC_RELAXED))
+#endif
+
+#ifndef pl_ldadd_acq
+#define pl_ldadd_acq(ptr, x) (__atomic_fetch_add((ptr), (x), __ATOMIC_ACQUIRE))
+#endif
+
+#ifndef pl_ldadd_rel
+#define pl_ldadd_relc(ptr, x) (__atomic_fetch_add((ptr), (x), __ATOMIC_RELEASE))
+#endif
+
+#ifndef pl_ldadd
+#define pl_ldadd(ptr, x) (__atomic_fetch_add((ptr), (x), __ATOMIC_SEQ_CST))
+#endif
+
+
+#ifndef pl_ldand_lax
+#define pl_ldand_lax(ptr, x) (__atomic_fetch_and((ptr), (x), __ATOMIC_RELAXED))
+#endif
+
+#ifndef pl_ldand_acq
+#define pl_ldand_acq(ptr, x) (__atomic_fetch_and((ptr), (x), __ATOMIC_ACQUIRE))
+#endif
+
+#ifndef pl_ldand_rel
+#define pl_ldand_relc(ptr, x) (__atomic_fetch_and((ptr), (x), __ATOMIC_RELEASE))
+#endif
+
+#ifndef pl_ldand
+#define pl_ldand(ptr, x) (__atomic_fetch_and((ptr), (x), __ATOMIC_SEQ_CST))
+#endif
+
+
+#ifndef pl_ldor_lax
+#define pl_ldor_lax(ptr, x) (__atomic_fetch_or((ptr), (x), __ATOMIC_RELAXED))
+#endif
+
+#ifndef pl_ldor_acq
+#define pl_ldor_acq(ptr, x) (__atomic_fetch_or((ptr), (x), __ATOMIC_ACQUIRE))
+#endif
+
+#ifndef pl_ldor_rel
+#define pl_ldor_relc(ptr, x) (__atomic_fetch_or((ptr), (x), __ATOMIC_RELEASE))
+#endif
+
+#ifndef pl_ldor
+#define pl_ldor(ptr, x) (__atomic_fetch_or((ptr), (x), __ATOMIC_SEQ_CST))
+#endif
+
+
+#ifndef pl_ldsub_lax
+#define pl_ldsub_lax(ptr, x) (__atomic_fetch_sub((ptr), (x), __ATOMIC_RELAXED))
+#endif
+
+#ifndef pl_ldsub_acq
+#define pl_ldsub_acq(ptr, x) (__atomic_fetch_sub((ptr), (x), __ATOMIC_ACQUIRE))
+#endif
+
+#ifndef pl_ldsub_rel
+#define pl_ldsub_relc(ptr, x) (__atomic_fetch_sub((ptr), (x), __ATOMIC_RELEASE))
+#endif
+
+#ifndef pl_ldsub
+#define pl_ldsub(ptr, x) (__atomic_fetch_sub((ptr), (x), __ATOMIC_SEQ_CST))
+#endif
+
+
+#ifndef pl_ldxor_lax
+#define pl_ldxor_lax(ptr, x) (__atomic_fetch_xor((ptr), (x), __ATOMIC_RELAXED))
+#endif
+
+#ifndef pl_ldxor_acq
+#define pl_ldxor_acq(ptr, x) (__atomic_fetch_xor((ptr), (x), __ATOMIC_ACQUIRE))
+#endif
+
+#ifndef pl_ldxor_rel
+#define pl_ldxor_relc(ptr, x) (__atomic_fetch_xor((ptr), (x), __ATOMIC_RELEASE))
+#endif
+
+#ifndef pl_ldxor
+#define pl_ldxor(ptr, x) (__atomic_fetch_xor((ptr), (x), __ATOMIC_SEQ_CST))
+#endif
+
+#endif /* end of C11 atomics */
+
+
+/*
+ * Automatically remap to fallback code when available. This allows the arch
+ * specific code above to be used as an immediate fallback for missing C11
+ * definitions. Everything not defined will use the generic code at the end.
+ */
+
+#if !defined(pl_cpu_relax) && defined(_pl_cpu_relax)
+# define pl_cpu_relax _pl_cpu_relax
+#endif
+
+#if !defined(pl_barrier) && defined(_pl_barrier)
+# define pl_barrier _pl_barrier
+#endif
+
+#if !defined(pl_mb) && defined(_pl_mb)
+# define pl_mb _pl_mb
+#endif
+
+#if !defined(pl_mb_load) && defined(_pl_mb_load)
+# define pl_mb_load _pl_mb_load
+#endif
+
+#if !defined(pl_mb_store) && defined(_pl_mb_store)
+# define pl_mb_store _pl_mb_store
+#endif
+
+#if !defined(pl_mb_ato) && defined(_pl_mb_ato)
+# define pl_mb_ato _pl_mb_ato
+#endif
+
+#if !defined(pl_mb_ato_load) && defined(_pl_mb_ato_load)
+# define pl_mb_ato_load _pl_mb_ato_load
+#endif
+
+#if !defined(pl_mb_ato_store) && defined(_pl_mb_ato_store)
+# define pl_mb_ato_store _pl_mb_ato_store
+#endif
+
+
+#if !defined(pl_load) && defined(_pl_load)
+#define pl_load _pl_load
+#endif
+
+#if !defined(pl_load_lax) && defined(_pl_load_lax)
+#define pl_load_lax _pl_load_lax
+#endif
+
+#if !defined(pl_store) && defined(_pl_store)
+#define pl_store _pl_store
+#endif
+
+#if !defined(pl_store_lax) && defined(_pl_store_lax)
+#define pl_store_lax _pl_store_lax
+#endif
+
+
+#if !defined(pl_inc_noret_lax) && defined(_pl_inc_noret_lax)
+# define pl_inc_noret_lax _pl_inc_noret_lax
+#endif
+
+#if !defined(pl_inc_noret_acq) && defined(_pl_inc_noret_acq)
+# define pl_inc_noret_acq _pl_inc_noret_acq
+#endif
+
+#if !defined(pl_inc_noret_rel) && defined(_pl_inc_noret_rel)
+# define pl_inc_noret_rel _pl_inc_noret_rel
+#endif
+
+#if !defined(pl_inc_noret) && defined(_pl_inc_noret)
+# define pl_inc_noret _pl_inc_noret
+#endif
+
+
+#if !defined(pl_dec_noret_lax) && defined(_pl_dec_noret_lax)
+# define pl_dec_noret_lax _pl_dec_noret_lax
+#endif
+
+#if !defined(pl_dec_noret_acq) && defined(_pl_dec_noret_acq)
+# define pl_dec_noret_acq _pl_dec_noret_acq
+#endif
+
+#if !defined(pl_dec_noret_rel) && defined(_pl_dec_noret_rel)
+# define pl_dec_noret_rel _pl_dec_noret_rel
+#endif
+
+#if !defined(pl_dec_noret) && defined(_pl_dec_noret)
+# define pl_dec_noret _pl_dec_noret
+#endif
+
+
+#if !defined(pl_inc_lax) && defined(_pl_inc_lax)
+# define pl_inc_lax _pl_inc_lax
+#endif
+
+#if !defined(pl_inc_acq) && defined(_pl_inc_acq)
+# define pl_inc_acq _pl_inc_acq
+#endif
+
+#if !defined(pl_inc_rel) && defined(_pl_inc_rel)
+# define pl_inc_rel _pl_inc_rel
+#endif
+
+#if !defined(pl_inc) && defined(_pl_inc)
+# define pl_inc _pl_inc
+#endif
+
+
+#if !defined(pl_dec_lax) && defined(_pl_dec_lax)
+# define pl_dec_lax _pl_dec_lax
+#endif
+
+#if !defined(pl_dec_acq) && defined(_pl_dec_acq)
+# define pl_dec_acq _pl_dec_acq
+#endif
+
+#if !defined(pl_dec_rel) && defined(_pl_dec_rel)
+# define pl_dec_rel _pl_dec_rel
+#endif
+
+#if !defined(pl_dec) && defined(_pl_dec)
+# define pl_dec _pl_dec
+#endif
+
+
+#if !defined(pl_add_lax) && defined(_pl_add_lax)
+# define pl_add_lax _pl_add_lax
+#endif
+
+#if !defined(pl_add_acq) && defined(_pl_add_acq)
+# define pl_add_acq _pl_add_acq
+#endif
+
+#if !defined(pl_add_rel) && defined(_pl_add_rel)
+# define pl_add_rel _pl_add_rel
+#endif
+
+#if !defined(pl_add) && defined(_pl_add)
+# define pl_add _pl_add
+#endif
+
+
+#if !defined(pl_add_noret_lax) && defined(_pl_add_noret_lax)
+# define pl_add_noret_lax _pl_add_noret_lax
+#endif
+
+#if !defined(pl_add_noret_acq) && defined(_pl_add_noret_acq)
+# define pl_add_noret_acq _pl_add_noret_acq
+#endif
+
+#if !defined(pl_add_noret_rel) && defined(_pl_add_noret_rel)
+# define pl_add_noret_rel _pl_add_noret_rel
+#endif
+
+#if !defined(pl_add_noret) && defined(_pl_add_noret)
+# define pl_add_noret _pl_add_noret
+#endif
+
+#if !defined(pl_and_lax) && defined(_pl_and_lax)
+# define pl_and_lax _pl_and_lax
+#endif
+
+#if !defined(pl_and_acq) && defined(_pl_and_acq)
+# define pl_and_acq _pl_and_acq
+#endif
+
+#if !defined(pl_and_rel) && defined(_pl_and_rel)
+# define pl_and_rel _pl_and_rel
+#endif
+
+#if !defined(pl_and) && defined(_pl_and)
+# define pl_and _pl_and
+#endif
+
+
+#if !defined(pl_and_noret_lax) && defined(_pl_and_noret_lax)
+# define pl_and_noret_lax _pl_and_noret_lax
+#endif
+
+#if !defined(pl_and_noret_acq) && defined(_pl_and_noret_acq)
+# define pl_and_noret_acq _pl_and_noret_acq
+#endif
+
+#if !defined(pl_and_noret_rel) && defined(_pl_and_noret_rel)
+# define pl_and_noret_rel _pl_and_noret_rel
+#endif
+
+#if !defined(pl_and_noret) && defined(_pl_and_noret)
+# define pl_and_noret _pl_and_noret
+#endif
+
+
+#if !defined(pl_or_lax) && defined(_pl_or_lax)
+# define pl_or_lax _pl_or_lax
+#endif
+
+#if !defined(pl_or_acq) && defined(_pl_or_acq)
+# define pl_or_acq _pl_or_acq
+#endif
+
+#if !defined(pl_or_rel) && defined(_pl_or_rel)
+# define pl_or_rel _pl_or_rel
+#endif
+
+#if !defined(pl_or) && defined(_pl_or)
+# define pl_or _pl_or
+#endif
+
+
+#if !defined(pl_or_noret_lax) && defined(_pl_or_noret_lax)
+# define pl_or_noret_lax _pl_or_noret_lax
+#endif
+
+#if !defined(pl_or_noret_acq) && defined(_pl_or_noret_acq)
+# define pl_or_noret_acq _pl_or_noret_acq
+#endif
+
+#if !defined(pl_or_noret_rel) && defined(_pl_or_noret_rel)
+# define pl_or_noret_rel _pl_or_noret_rel
+#endif
+
+#if !defined(pl_or_noret) && defined(_pl_or_noret)
+# define pl_or_noret _pl_or_noret
+#endif
+
+
+#if !defined(pl_xor_lax) && defined(_pl_xor_lax)
+# define pl_xor_lax _pl_xor_lax
+#endif
+
+#if !defined(pl_xor_acq) && defined(_pl_xor_acq)
+# define pl_xor_acq _pl_xor_acq
+#endif
+
+#if !defined(pl_xor_rel) && defined(_pl_xor_rel)
+# define pl_xor_rel _pl_xor_rel
+#endif
+
+#if !defined(pl_xor) && defined(_pl_xor)
+# define pl_xor _pl_xor
+#endif
+
+
+#if !defined(pl_xor_noret_lax) && defined(_pl_xor_noret_lax)
+# define pl_xor_noret_lax _pl_xor_noret_lax
+#endif
+
+#if !defined(pl_xor_noret_acq) && defined(_pl_xor_noret_acq)
+# define pl_xor_noret_acq _pl_xor_noret_acq
+#endif
+
+#if !defined(pl_xor_noret_rel) && defined(_pl_xor_noret_rel)
+# define pl_xor_noret_rel _pl_xor_noret_rel
+#endif
+
+#if !defined(pl_xor_noret) && defined(_pl_xor_noret)
+# define pl_xor_noret _pl_xor_noret
+#endif
+
+
+#if !defined(pl_sub_lax) && defined(_pl_sub_lax)
+# define pl_sub_lax _pl_sub_lax
+#endif
+
+#if !defined(pl_sub_acq) && defined(_pl_sub_acq)
+# define pl_sub_acq _pl_sub_acq
+#endif
+
+#if !defined(pl_sub_rel) && defined(_pl_sub_rel)
+# define pl_sub_rel _pl_sub_rel
+#endif
+
+#if !defined(pl_sub) && defined(_pl_sub)
+# define pl_sub _pl_sub
+#endif
+
+
+#if !defined(pl_sub_noret_lax) && defined(_pl_sub_noret_lax)
+# define pl_sub_noret_lax _pl_sub_noret_lax
+#endif
+
+#if !defined(pl_sub_noret_acq) && defined(_pl_sub_noret_acq)
+# define pl_sub_noret_acq _pl_sub_noret_acq
+#endif
+
+#if !defined(pl_sub_noret_rel) && defined(_pl_sub_noret_rel)
+# define pl_sub_noret_rel _pl_sub_noret_rel
+#endif
+
+#if !defined(pl_sub_noret) && defined(_pl_sub_noret)
+# define pl_sub_noret _pl_sub_noret
+#endif
+
+
+#if !defined(pl_btr_lax) && defined(_pl_btr_lax)
+# define pl_btr_lax _pl_btr_lax
+#endif
+
+#if !defined(pl_btr_acq) && defined(_pl_btr_acq)
+# define pl_btr_acq _pl_btr_acq
+#endif
+
+#if !defined(pl_btr_rel) && defined(_pl_btr_rel)
+# define pl_btr_rel _pl_btr_rel
+#endif
+
+#if !defined(pl_btr) && defined(_pl_btr)
+# define pl_btr _pl_btr
+#endif
+
+
+#if !defined(pl_bts_lax) && defined(_pl_bts_lax)
+# define pl_bts_lax _pl_bts_lax
+#endif
+
+#if !defined(pl_bts_acq) && defined(_pl_bts_acq)
+# define pl_bts_acq _pl_bts_acq
+#endif
+
+#if !defined(pl_bts_rel) && defined(_pl_bts_rel)
+# define pl_bts_rel _pl_bts_rel
+#endif
+
+#if !defined(pl_bts) && defined(_pl_bts)
+# define pl_bts _pl_bts
+#endif
+
+
+#if !defined(pl_xadd) && defined(_pl_xadd)
+# define pl_xadd _pl_xadd
+#endif
+
+#if !defined(pl_cmpxchg) && defined(_pl_cmpxchg)
+# define pl_cmpxchg _pl_cmpxchg
+#endif
+
+#if !defined(pl_xchg) && defined(_pl_xchg)
+# define pl_xchg _pl_xchg
+#endif
+
+
+#if !defined(pl_ldadd_lax) && defined(_pl_ldadd_lax)
+# define pl_ldadd_lax _pl_ldadd_lax
+#endif
+
+#if !defined(pl_ldadd_acq) && defined(_pl_ldadd_acq)
+# define pl_ldadd_acq _pl_ldadd_acq
+#endif
+
+#if !defined(pl_ldadd_rel) && defined(_pl_ldadd_rel)
+# define pl_ldadd_rel _pl_ldadd_rel
+#endif
+
+#if !defined(pl_ldadd) && defined(_pl_ldadd)
+# define pl_ldadd _pl_ldadd
+#endif
+
+
+#if !defined(pl_ldand_lax) && defined(_pl_ldand_lax)
+# define pl_ldand_lax _pl_ldand_lax
+#endif
+
+#if !defined(pl_ldand_acq) && defined(_pl_ldand_acq)
+# define pl_ldand_acq _pl_ldand_acq
+#endif
+
+#if !defined(pl_ldand_rel) && defined(_pl_ldand_rel)
+# define pl_ldand_rel _pl_ldand_rel
+#endif
+
+#if !defined(pl_ldand) && defined(_pl_ldand)
+# define pl_ldand _pl_ldand
+#endif
+
+
+#if !defined(pl_ldor_lax) && defined(_pl_ldor_lax)
+# define pl_ldor_lax _pl_ldor_lax
+#endif
+
+#if !defined(pl_ldor_acq) && defined(_pl_ldor_acq)
+# define pl_ldor_acq _pl_ldor_acq
+#endif
+
+#if !defined(pl_ldor_rel) && defined(_pl_ldor_rel)
+# define pl_ldor_rel _pl_ldor_rel
+#endif
+
+#if !defined(pl_ldor) && defined(_pl_ldor)
+# define pl_ldor _pl_ldor
+#endif
+
+
+#if !defined(pl_ldxor_lax) && defined(_pl_ldxor_lax)
+# define pl_ldxor_lax _pl_ldxor_lax
+#endif
+
+#if !defined(pl_ldxor_acq) && defined(_pl_ldxor_acq)
+# define pl_ldxor_acq _pl_ldxor_acq
+#endif
+
+#if !defined(pl_ldxor_rel) && defined(_pl_ldxor_rel)
+# define pl_ldxor_rel _pl_ldxor_rel
+#endif
+
+#if !defined(pl_ldxor) && defined(_pl_ldxor)
+# define pl_ldxor _pl_ldxor
+#endif
+
+
+#if !defined(pl_ldsub_lax) && defined(_pl_ldsub_lax)
+# define pl_ldsub_lax _pl_ldsub_lax
+#endif
+
+#if !defined(pl_ldsub_acq) && defined(_pl_ldsub_acq)
+# define pl_ldsub_acq _pl_ldsub_acq
+#endif
+
+#if !defined(pl_ldsub_rel) && defined(_pl_ldsub_rel)
+# define pl_ldsub_rel _pl_ldsub_rel
+#endif
+
+#if !defined(pl_ldsub) && defined(_pl_ldsub)
+# define pl_ldsub _pl_ldsub
+#endif
+
+
+/*
+ * Generic code using the __sync API for everything not defined above.
+ */
+
+
+/* CPU relaxation while waiting */
+#ifndef pl_cpu_relax
+#define pl_cpu_relax() do { \
+ asm volatile(""); \
+ } while (0)
+#endif
+
+/* compiler-only memory barrier, for use around locks */
+#ifndef pl_barrier
+#define pl_barrier() do { \
+ asm volatile("" ::: "memory"); \
+ } while (0)
+#endif
+
+/* full memory barrier */
+#ifndef pl_mb
+#define pl_mb() do { \
+ __sync_synchronize(); \
+ } while (0)
+#endif
+
+#ifndef pl_mb_load
+#define pl_mb_load() pl_mb()
+#endif
+
+#ifndef pl_mb_store
+#define pl_mb_store() pl_mb()
+#endif
+
+#ifndef pl_mb_ato
+#define pl_mb_ato() pl_mb()
+#endif
+
+#ifndef pl_mb_ato_load
+#define pl_mb_ato_load() pl_mb_ato()
+#endif
+
+#ifndef pl_mb_ato_store
+#define pl_mb_ato_store() pl_mb_ato()
+#endif
+
+/* atomic load: volatile after a load barrier */
+#ifndef pl_load
+#define pl_load(ptr) ({ \
+ typeof(*(ptr)) __pl_ret = ({ \
+ pl_mb_load(); \
+ *(volatile typeof(ptr))ptr; \
+ }); \
+ __pl_ret; \
+ })
+#endif
+
+/* atomic store, old style using a CAS */
+#ifndef pl_store
+#define pl_store(ptr, x) do { \
+ typeof((ptr)) __pl_ptr = (ptr); \
+ typeof((x)) __pl_x = (x); \
+ typeof(*(ptr)) __pl_old; \
+ do { \
+ __pl_old = *__pl_ptr; \
+ } while (!__sync_bool_compare_and_swap(__pl_ptr, __pl_old, __pl_x)); \
+ } while (0)
+#endif
+
+#ifndef pl_inc_noret
+#define pl_inc_noret(ptr) do { __sync_add_and_fetch((ptr), 1); } while (0)
+#endif
+
+#ifndef pl_dec_noret
+#define pl_dec_noret(ptr) do { __sync_sub_and_fetch((ptr), 1); } while (0)
+#endif
+
+#ifndef pl_inc
+#define pl_inc(ptr) ({ __sync_add_and_fetch((ptr), 1); })
+#endif
+
+#ifndef pl_dec
+#define pl_dec(ptr) ({ __sync_sub_and_fetch((ptr), 1); })
+#endif
+
+#ifndef pl_add
+#define pl_add(ptr, x) ({ __sync_add_and_fetch((ptr), (x)); })
+#endif
+
+#ifndef pl_and
+#define pl_and(ptr, x) ({ __sync_and_and_fetch((ptr), (x)); })
+#endif
+
+#ifndef pl_or
+#define pl_or(ptr, x) ({ __sync_or_and_fetch((ptr), (x)); })
+#endif
+
+#ifndef pl_xor
+#define pl_xor(ptr, x) ({ __sync_xor_and_fetch((ptr), (x)); })
+#endif
+
+#ifndef pl_sub
+#define pl_sub(ptr, x) ({ __sync_sub_and_fetch((ptr), (x)); })
+#endif
+
+#ifndef pl_btr
+#define pl_btr(ptr, bit) ({ typeof(*(ptr)) __pl_t = ((typeof(*(ptr)))1) << (bit); \
+ __sync_fetch_and_and((ptr), ~__pl_t) & __pl_t; \
+ })
+#endif
+
+#ifndef pl_bts
+#define pl_bts(ptr, bit) ({ typeof(*(ptr)) __pl_t = ((typeof(*(ptr)))1) << (bit); \
+ __sync_fetch_and_or((ptr), __pl_t) & __pl_t; \
+ })
+#endif
+
+#ifndef pl_xadd
+#define pl_xadd(ptr, x) ({ __sync_fetch_and_add((ptr), (x)); })
+#endif
+
+#ifndef pl_cmpxchg
+#define pl_cmpxchg(ptr, o, n) ({ __sync_val_compare_and_swap((ptr), (o), (n)); })
+#endif
+
+#ifndef pl_xchg
+#define pl_xchg(ptr, x) ({ \
+ typeof((ptr)) __pl_ptr = (ptr); \
+ typeof((x)) __pl_x = (x); \
+ typeof(*(ptr)) __pl_old; \
+ do { \
+ __pl_old = *__pl_ptr; \
+ } while (!__sync_bool_compare_and_swap(__pl_ptr, __pl_old, __pl_x)); \
+ __pl_old; \
+ })
+#endif
+
+#ifndef pl_ldadd
+#define pl_ldadd(ptr, x) ({ __sync_fetch_and_add((ptr), (x)); })
+#endif
+
+#ifndef pl_ldand
+#define pl_ldand(ptr, x) ({ __sync_fetch_and_and((ptr), (x)); })
+#endif
+
+#ifndef pl_ldor
+#define pl_ldor(ptr, x) ({ __sync_fetch_and_or((ptr), (x)); })
+#endif
+
+#ifndef pl_ldxor
+#define pl_ldxor(ptr, x) ({ __sync_fetch_and_xor((ptr), (x)); })
+#endif
+
+#ifndef pl_ldsub
+#define pl_ldsub(ptr, x) ({ __sync_fetch_and_sub((ptr), (x)); })
+#endif
+
+/* certain _noret operations may be defined from the regular ones */
+#if !defined(pl_inc_noret) && defined(pl_inc)
+# define pl_inc_noret(ptr) (void)pl_inc(ptr)
+#endif
+
+#if !defined(pl_dec_noret) && defined(pl_dec)
+# define pl_dec_noret(ptr) (void)pl_dec(ptr)
+#endif
+
+#if !defined(pl_add_noret) && defined(pl_add)
+# define pl_add_noret(ptr, x) (void)pl_add(ptr, x)
+#endif
+
+#if !defined(pl_sub_noret) && defined(pl_sub)
+# define pl_sub_noret(ptr, x) (void)pl_sub(ptr, x)
+#endif
+
+#if !defined(pl_or_noret) && defined(pl_or)
+# define pl_or_noret(ptr, x) (void)pl_or(ptr, x)
+#endif
+
+#if !defined(pl_and_noret) && defined(pl_and)
+# define pl_and_noret(ptr, x) (void)pl_and(ptr, x)
+#endif
+
+#if !defined(pl_xor_noret) && defined(pl_xor)
+# define pl_xor_noret(ptr, x) (void)pl_xor(ptr, x)
+#endif
+
+/* certain memory orders may fallback to the generic seq_cst definition */
+
+#if !defined(pl_load_lax) && defined(pl_load)
+#define pl_load_lax pl_load
+#endif
+
+
+#if !defined(pl_store_lax) && defined(pl_store)
+#define pl_store_lax pl_store
+#endif
+
+
+#if !defined(pl_inc_lax) && defined(pl_inc)
+# define pl_inc_lax pl_inc
+#endif
+#if !defined(pl_inc_acq) && defined(pl_inc)
+# define pl_inc_acq pl_inc
+#endif
+#if !defined(pl_inc_rel) && defined(pl_inc)
+# define pl_inc_rel pl_inc
+#endif
+
+
+#if !defined(pl_dec_lax) && defined(pl_dec)
+# define pl_dec_lax pl_dec
+#endif
+#if !defined(pl_dec_acq) && defined(pl_dec)
+# define pl_dec_acq pl_dec
+#endif
+
+#if !defined(pl_dec_rel) && defined(pl_dec)
+# define pl_dec_rel pl_dec
+#endif
+
+
+#if !defined(pl_inc_noret_lax) && defined(pl_inc_noret)
+# define pl_inc_noret_lax pl_inc_noret
+#endif
+
+#if !defined(pl_inc_noret_acq) && defined(pl_inc_noret)
+# define pl_inc_noret_acq pl_inc_noret
+#endif
+
+#if !defined(pl_inc_noret_rel) && defined(pl_inc_noret)
+# define pl_inc_noret_rel pl_inc_noret
+#endif
+
+
+#if !defined(pl_dec_noret_lax) && defined(pl_dec_noret)
+# define pl_dec_noret_lax pl_dec_noret
+#endif
+
+#if !defined(pl_dec_noret_acq) && defined(pl_dec_noret)
+# define pl_dec_noret_acq pl_dec_noret
+#endif
+
+#if !defined(pl_dec_noret_rel) && defined(pl_dec_noret)
+# define pl_dec_noret_rel pl_dec_noret
+#endif
+
+
+#if !defined(pl_add_lax) && defined(pl_add)
+# define pl_add_lax pl_add
+#endif
+
+#if !defined(pl_add_acq) && defined(pl_add)
+# define pl_add_acq pl_add
+#endif
+
+#if !defined(pl_add_rel) && defined(pl_add)
+# define pl_add_rel pl_add
+#endif
+
+
+#if !defined(pl_sub_lax) && defined(pl_sub)
+# define pl_sub_lax pl_sub
+#endif
+
+#if !defined(pl_sub_acq) && defined(pl_sub)
+# define pl_sub_acq pl_sub
+#endif
+
+#if !defined(pl_sub_rel) && defined(pl_sub)
+# define pl_sub_rel pl_sub
+#endif
+
+
+#if !defined(pl_and_lax) && defined(pl_and)
+# define pl_and_lax pl_and
+#endif
+
+#if !defined(pl_and_acq) && defined(pl_and)
+# define pl_and_acq pl_and
+#endif
+
+#if !defined(pl_and_rel) && defined(pl_and)
+# define pl_and_rel pl_and
+#endif
+
+
+#if !defined(pl_or_lax) && defined(pl_or)
+# define pl_or_lax pl_or
+#endif
+
+#if !defined(pl_or_acq) && defined(pl_or)
+# define pl_or_acq pl_or
+#endif
+
+#if !defined(pl_or_rel) && defined(pl_or)
+# define pl_or_rel pl_or
+#endif
+
+
+#if !defined(pl_xor_lax) && defined(pl_xor)
+# define pl_xor_lax pl_xor
+#endif
+
+#if !defined(pl_xor_acq) && defined(pl_xor)
+# define pl_xor_acq pl_xor
+#endif
+
+#if !defined(pl_xor_rel) && defined(pl_xor)
+# define pl_xor_rel pl_xor
+#endif
+
+
+#if !defined(pl_add_noret_lax) && defined(pl_add_noret)
+# define pl_add_noret_lax pl_add_noret
+#endif
+
+#if !defined(pl_add_noret_acq) && defined(pl_add_noret)
+# define pl_add_noret_acq pl_add_noret
+#endif
+
+#if !defined(pl_add_noret_rel) && defined(pl_add_noret)
+# define pl_add_noret_rel pl_add_noret
+#endif
+
+
+#if !defined(pl_sub_noret_lax) && defined(pl_sub_noret)
+# define pl_sub_noret_lax pl_sub_noret
+#endif
+
+#if !defined(pl_sub_noret_acq) && defined(pl_sub_noret)
+# define pl_sub_noret_acq pl_sub_noret
+#endif
+
+#if !defined(pl_sub_noret_rel) && defined(pl_sub_noret)
+# define pl_sub_noret_rel pl_sub_noret
+#endif
+
+
+#if !defined(pl_and_noret_lax) && defined(pl_and_noret)
+# define pl_and_noret_lax pl_and_noret
+#endif
+
+#if !defined(pl_and_noret_acq) && defined(pl_and_noret)
+# define pl_and_noret_acq pl_and_noret
+#endif
+
+#if !defined(pl_and_noret_rel) && defined(pl_and_noret)
+# define pl_and_noret_rel pl_and_noret
+#endif
+
+
+#if !defined(pl_or_noret_lax) && defined(pl_or_noret)
+# define pl_or_noret_lax pl_or_noret
+#endif
+
+#if !defined(pl_or_noret_acq) && defined(pl_or_noret)
+# define pl_or_noret_acq pl_or_noret
+#endif
+
+#if !defined(pl_or_noret_rel) && defined(pl_or_noret)
+# define pl_or_noret_rel pl_or_noret
+#endif
+
+
+#if !defined(pl_xor_noret_lax) && defined(pl_xor_noret)
+# define pl_xor_noret_lax pl_xor_noret
+#endif
+
+#if !defined(pl_xor_noret_acq) && defined(pl_xor_noret)
+# define pl_xor_noret_acq pl_xor_noret
+#endif
+
+#if !defined(pl_xor_noret_rel) && defined(pl_xor_noret)
+# define pl_xor_noret_rel pl_xor_noret
+#endif
+
+
+#if !defined(pl_btr_lax) && defined(pl_btr)
+# define pl_btr_lax pl_btr
+#endif
+
+#if !defined(pl_btr_acq) && defined(pl_btr)
+# define pl_btr_acq pl_btr
+#endif
+
+#if !defined(pl_btr_rel) && defined(pl_btr)
+# define pl_btr_rel pl_btr
+#endif
+
+
+#if !defined(pl_bts_lax) && defined(pl_bts)
+# define pl_bts_lax pl_bts
+#endif
+
+#if !defined(pl_bts_acq) && defined(pl_bts)
+# define pl_bts_acq pl_bts
+#endif
+
+#if !defined(pl_bts_rel) && defined(pl_bts)
+# define pl_bts_rel pl_bts
+#endif
+
+
+#if !defined(pl_ldadd_lax) && defined(pl_ldadd)
+# define pl_ldadd_lax pl_ldadd
+#endif
+
+#if !defined(pl_ldadd_acq) && defined(pl_ldadd)
+# define pl_ldadd_acq pl_ldadd
+#endif
+
+#if !defined(pl_ldadd_rel) && defined(pl_ldadd)
+# define pl_ldadd_rel pl_ldadd
+#endif
+
+
+#if !defined(pl_ldsub_lax) && defined(pl_ldsub)
+# define pl_ldsub_lax pl_ldsub
+#endif
+
+#if !defined(pl_ldsub_acq) && defined(pl_ldsub)
+# define pl_ldsub_acq pl_ldsub
+#endif
+
+#if !defined(pl_ldsub_rel) && defined(pl_ldsub)
+# define pl_ldsub_rel pl_ldsub
+#endif
+
+
+#if !defined(pl_ldand_lax) && defined(pl_ldand)
+# define pl_ldand_lax pl_ldand
+#endif
+
+#if !defined(pl_ldand_acq) && defined(pl_ldand)
+# define pl_ldand_acq pl_ldand
+#endif
+
+#if !defined(pl_ldand_rel) && defined(pl_ldand)
+# define pl_ldand_rel pl_ldand
+#endif
+
+
+#if !defined(pl_ldor_lax) && defined(pl_ldor)
+# define pl_ldor_lax pl_ldor
+#endif
+
+#if !defined(pl_ldor_acq) && defined(pl_ldor)
+# define pl_ldor_acq pl_ldor
+#endif
+
+#if !defined(pl_ldor_rel) && defined(pl_ldor)
+# define pl_ldor_rel pl_ldor
+#endif
+
+
+#if !defined(pl_ldxor_lax) && defined(pl_ldxor)
+# define pl_ldxor_lax pl_ldxor
+#endif
+
+#if !defined(pl_ldxor_acq) && defined(pl_ldxor)
+# define pl_ldxor_acq pl_ldxor
+#endif
+
+#if !defined(pl_ldxor_rel) && defined(pl_ldxor)
+# define pl_ldxor_rel pl_ldxor
+#endif
+
+
+#endif /* PL_ATOMIC_OPS_H */
diff --git a/include/import/eb32sctree.h b/include/import/eb32sctree.h
new file mode 100644
index 0000000..5ace662
--- /dev/null
+++ b/include/import/eb32sctree.h
@@ -0,0 +1,121 @@
+/*
+ * Elastic Binary Trees - macros and structures for operations on 32bit nodes.
+ * Version 6.0.6 with backports from v7-dev
+ * (C) 2002-2017 - Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _EB32SCTREE_H
+#define _EB32SCTREE_H
+
+#include "ebtree.h"
+
+
+/* Return the structure of type <type> whose member <member> points to <ptr> */
+#define eb32sc_entry(ptr, type, member) container_of(ptr, type, member)
+
+/*
+ * Exported functions and macros.
+ * Many of them are always inlined because they are extremely small, and
+ * are generally called at most once or twice in a program.
+ */
+
+/*
+ * The following functions are not inlined by default. They are declared
+ * in eb32sctree.c, which simply relies on their inline version.
+ */
+struct eb32sc_node *eb32sc_lookup_ge(struct eb_root *root, u32 x, unsigned long scope);
+struct eb32sc_node *eb32sc_lookup_ge_or_first(struct eb_root *root, u32 x, unsigned long scope);
+struct eb32sc_node *eb32sc_insert(struct eb_root *root, struct eb32sc_node *new, unsigned long scope);
+void eb32sc_delete(struct eb32sc_node *node);
+
+/* Walks down left starting at root pointer <start>, and follow the leftmost
+ * branch whose scope matches <scope>. It either returns the node hosting the
+ * first leaf on that side, or NULL if no leaf is found. <start> may either be
+ * NULL or a branch pointer. The pointer to the leaf (or NULL) is returned.
+ */
+static inline struct eb32sc_node *eb32sc_walk_down_left(eb_troot_t *start, unsigned long scope)
+{
+ struct eb_root *root;
+ struct eb_node *node;
+ struct eb32sc_node *eb32;
+
+ if (unlikely(!start))
+ return NULL;
+
+ while (1) {
+ if (eb_gettag(start) == EB_NODE) {
+ root = eb_untag(start, EB_NODE);
+ node = eb_root_to_node(root);
+ eb32 = container_of(node, struct eb32sc_node, node);
+ if (eb32->node_s & scope) {
+ start = node->branches.b[EB_LEFT];
+ continue;
+ }
+ start = node->node_p;
+ }
+ else {
+ root = eb_untag(start, EB_LEAF);
+ node = eb_root_to_node(root);
+ eb32 = container_of(node, struct eb32sc_node, node);
+ if (eb32->leaf_s & scope)
+ return eb32;
+ start = node->leaf_p;
+ }
+
+ /* here we're on a node that doesn't match the scope. We have
+ * to walk to the closest right location.
+ */
+ while (eb_gettag(start) != EB_LEFT)
+ /* Walking up from right branch, so we cannot be below root */
+ start = (eb_root_to_node(eb_untag(start, EB_RGHT)))->node_p;
+
+ /* Note that <start> cannot be NULL at this stage */
+ root = eb_untag(start, EB_LEFT);
+ start = root->b[EB_RGHT];
+ if (eb_clrtag(start) == NULL)
+ return NULL;
+ }
+}
+
+/* Return next node in the tree, starting with tagged parent <start>, or NULL if none */
+static inline struct eb32sc_node *eb32sc_next_with_parent(eb_troot_t *start, unsigned long scope)
+{
+ while (eb_gettag(start) != EB_LEFT)
+ /* Walking up from right branch, so we cannot be below root */
+ start = (eb_root_to_node(eb_untag(start, EB_RGHT)))->node_p;
+
+ /* Note that <t> cannot be NULL at this stage */
+ start = (eb_untag(start, EB_LEFT))->b[EB_RGHT];
+ if (eb_clrtag(start) == NULL)
+ return NULL;
+
+ return eb32sc_walk_down_left(start, scope);
+}
+
+/* Return next node in the tree, or NULL if none */
+static inline struct eb32sc_node *eb32sc_next(struct eb32sc_node *eb32, unsigned long scope)
+{
+ return eb32sc_next_with_parent(eb32->node.leaf_p, scope);
+}
+
+/* Return leftmost node in the tree, or NULL if none */
+static inline struct eb32sc_node *eb32sc_first(struct eb_root *root, unsigned long scope)
+{
+ return eb32sc_walk_down_left(root->b[0], scope);
+}
+
+#endif /* _EB32SC_TREE_H */
diff --git a/include/import/eb32tree.h b/include/import/eb32tree.h
new file mode 100644
index 0000000..1c03fc1
--- /dev/null
+++ b/include/import/eb32tree.h
@@ -0,0 +1,482 @@
+/*
+ * Elastic Binary Trees - macros and structures for operations on 32bit nodes.
+ * Version 6.0.6
+ * (C) 2002-2011 - Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _EB32TREE_H
+#define _EB32TREE_H
+
+#include "ebtree.h"
+
+
+/* Return the structure of type <type> whose member <member> points to <ptr> */
+#define eb32_entry(ptr, type, member) container_of(ptr, type, member)
+
+/*
+ * Exported functions and macros.
+ * Many of them are always inlined because they are extremely small, and
+ * are generally called at most once or twice in a program.
+ */
+
+/* Return leftmost node in the tree, or NULL if none */
+static inline struct eb32_node *eb32_first(struct eb_root *root)
+{
+ return eb32_entry(eb_first(root), struct eb32_node, node);
+}
+
+/* Return rightmost node in the tree, or NULL if none */
+static inline struct eb32_node *eb32_last(struct eb_root *root)
+{
+ return eb32_entry(eb_last(root), struct eb32_node, node);
+}
+
+/* Return next node in the tree, or NULL if none */
+static inline struct eb32_node *eb32_next(struct eb32_node *eb32)
+{
+ return eb32_entry(eb_next(&eb32->node), struct eb32_node, node);
+}
+
+/* Return previous node in the tree, or NULL if none */
+static inline struct eb32_node *eb32_prev(struct eb32_node *eb32)
+{
+ return eb32_entry(eb_prev(&eb32->node), struct eb32_node, node);
+}
+
+/* Return next leaf node within a duplicate sub-tree, or NULL if none. */
+static inline struct eb32_node *eb32_next_dup(struct eb32_node *eb32)
+{
+ return eb32_entry(eb_next_dup(&eb32->node), struct eb32_node, node);
+}
+
+/* Return previous leaf node within a duplicate sub-tree, or NULL if none. */
+static inline struct eb32_node *eb32_prev_dup(struct eb32_node *eb32)
+{
+ return eb32_entry(eb_prev_dup(&eb32->node), struct eb32_node, node);
+}
+
+/* Return next node in the tree, skipping duplicates, or NULL if none */
+static inline struct eb32_node *eb32_next_unique(struct eb32_node *eb32)
+{
+ return eb32_entry(eb_next_unique(&eb32->node), struct eb32_node, node);
+}
+
+/* Return previous node in the tree, skipping duplicates, or NULL if none */
+static inline struct eb32_node *eb32_prev_unique(struct eb32_node *eb32)
+{
+ return eb32_entry(eb_prev_unique(&eb32->node), struct eb32_node, node);
+}
+
+/* Delete node from the tree if it was linked in. Mark the node unused. Note
+ * that this function relies on a non-inlined generic function: eb_delete.
+ */
+static inline void eb32_delete(struct eb32_node *eb32)
+{
+ eb_delete(&eb32->node);
+}
+
+/*
+ * The following functions are not inlined by default. They are declared
+ * in eb32tree.c, which simply relies on their inline version.
+ */
+struct eb32_node *eb32_lookup(struct eb_root *root, u32 x);
+struct eb32_node *eb32i_lookup(struct eb_root *root, s32 x);
+struct eb32_node *eb32_lookup_le(struct eb_root *root, u32 x);
+struct eb32_node *eb32_lookup_ge(struct eb_root *root, u32 x);
+struct eb32_node *eb32_insert(struct eb_root *root, struct eb32_node *new);
+struct eb32_node *eb32i_insert(struct eb_root *root, struct eb32_node *new);
+
+/*
+ * The following functions are less likely to be used directly, because their
+ * code is larger. The non-inlined version is preferred.
+ */
+
+/* Delete node from the tree if it was linked in. Mark the node unused. */
+static forceinline void __eb32_delete(struct eb32_node *eb32)
+{
+ __eb_delete(&eb32->node);
+}
+
+/*
+ * Find the first occurrence of a key in the tree <root>. If none can be
+ * found, return NULL.
+ */
+static forceinline struct eb32_node *__eb32_lookup(struct eb_root *root, u32 x)
+{
+ struct eb32_node *node;
+ eb_troot_t *troot;
+ u32 y;
+ int node_bit;
+
+ troot = root->b[EB_LEFT];
+ if (unlikely(troot == NULL))
+ return NULL;
+
+ while (1) {
+ if ((eb_gettag(troot) == EB_LEAF)) {
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct eb32_node, node.branches);
+ if (node->key == x)
+ return node;
+ else
+ return NULL;
+ }
+ node = container_of(eb_untag(troot, EB_NODE),
+ struct eb32_node, node.branches);
+ node_bit = node->node.bit;
+
+ y = node->key ^ x;
+ if (!y) {
+ /* Either we found the node which holds the key, or
+ * we have a dup tree. In the later case, we have to
+ * walk it down left to get the first entry.
+ */
+ if (node_bit < 0) {
+ troot = node->node.branches.b[EB_LEFT];
+ while (eb_gettag(troot) != EB_LEAF)
+ troot = (eb_untag(troot, EB_NODE))->b[EB_LEFT];
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct eb32_node, node.branches);
+ }
+ return node;
+ }
+
+ if ((y >> node_bit) >= EB_NODE_BRANCHES)
+ return NULL; /* no more common bits */
+
+ troot = node->node.branches.b[(x >> node_bit) & EB_NODE_BRANCH_MASK];
+ }
+}
+
+/*
+ * Find the first occurrence of a signed key in the tree <root>. If none can
+ * be found, return NULL.
+ */
+static forceinline struct eb32_node *__eb32i_lookup(struct eb_root *root, s32 x)
+{
+ struct eb32_node *node;
+ eb_troot_t *troot;
+ u32 key = x ^ 0x80000000;
+ u32 y;
+ int node_bit;
+
+ troot = root->b[EB_LEFT];
+ if (unlikely(troot == NULL))
+ return NULL;
+
+ while (1) {
+ if ((eb_gettag(troot) == EB_LEAF)) {
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct eb32_node, node.branches);
+ if (node->key == (u32)x)
+ return node;
+ else
+ return NULL;
+ }
+ node = container_of(eb_untag(troot, EB_NODE),
+ struct eb32_node, node.branches);
+ node_bit = node->node.bit;
+
+ y = node->key ^ x;
+ if (!y) {
+ /* Either we found the node which holds the key, or
+ * we have a dup tree. In the later case, we have to
+ * walk it down left to get the first entry.
+ */
+ if (node_bit < 0) {
+ troot = node->node.branches.b[EB_LEFT];
+ while (eb_gettag(troot) != EB_LEAF)
+ troot = (eb_untag(troot, EB_NODE))->b[EB_LEFT];
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct eb32_node, node.branches);
+ }
+ return node;
+ }
+
+ if ((y >> node_bit) >= EB_NODE_BRANCHES)
+ return NULL; /* no more common bits */
+
+ troot = node->node.branches.b[(key >> node_bit) & EB_NODE_BRANCH_MASK];
+ }
+}
+
+/* Insert eb32_node <new> into subtree starting at node root <root>.
+ * Only new->key needs be set with the key. The eb32_node is returned.
+ * If root->b[EB_RGHT]==1, the tree may only contain unique keys.
+ */
+static forceinline struct eb32_node *
+__eb32_insert(struct eb_root *root, struct eb32_node *new) {
+ struct eb32_node *old;
+ unsigned int side;
+ eb_troot_t *troot, **up_ptr;
+ u32 newkey; /* caching the key saves approximately one cycle */
+ eb_troot_t *root_right;
+ eb_troot_t *new_left, *new_rght;
+ eb_troot_t *new_leaf;
+ int old_node_bit;
+
+ side = EB_LEFT;
+ troot = root->b[EB_LEFT];
+ root_right = root->b[EB_RGHT];
+ if (unlikely(troot == NULL)) {
+ /* Tree is empty, insert the leaf part below the left branch */
+ root->b[EB_LEFT] = eb_dotag(&new->node.branches, EB_LEAF);
+ new->node.leaf_p = eb_dotag(root, EB_LEFT);
+ new->node.node_p = NULL; /* node part unused */
+ return new;
+ }
+
+ /* The tree descent is fairly easy :
+ * - first, check if we have reached a leaf node
+ * - second, check if we have gone too far
+ * - third, reiterate
+ * Everywhere, we use <new> for the node node we are inserting, <root>
+ * for the node we attach it to, and <old> for the node we are
+ * displacing below <new>. <troot> will always point to the future node
+ * (tagged with its type). <side> carries the side the node <new> is
+ * attached to below its parent, which is also where previous node
+ * was attached. <newkey> carries the key being inserted.
+ */
+ newkey = new->key;
+
+ while (1) {
+ if (eb_gettag(troot) == EB_LEAF) {
+ /* insert above a leaf */
+ old = container_of(eb_untag(troot, EB_LEAF),
+ struct eb32_node, node.branches);
+ new->node.node_p = old->node.leaf_p;
+ up_ptr = &old->node.leaf_p;
+ break;
+ }
+
+ /* OK we're walking down this link */
+ old = container_of(eb_untag(troot, EB_NODE),
+ struct eb32_node, node.branches);
+ old_node_bit = old->node.bit;
+
+ /* Stop going down when we don't have common bits anymore. We
+ * also stop in front of a duplicates tree because it means we
+ * have to insert above.
+ */
+
+ if ((old_node_bit < 0) || /* we're above a duplicate tree, stop here */
+ (((new->key ^ old->key) >> old_node_bit) >= EB_NODE_BRANCHES)) {
+ /* The tree did not contain the key, so we insert <new> before the node
+ * <old>, and set ->bit to designate the lowest bit position in <new>
+ * which applies to ->branches.b[].
+ */
+ new->node.node_p = old->node.node_p;
+ up_ptr = &old->node.node_p;
+ break;
+ }
+
+ /* walk down */
+ root = &old->node.branches;
+ side = (newkey >> old_node_bit) & EB_NODE_BRANCH_MASK;
+ troot = root->b[side];
+ }
+
+ new_left = eb_dotag(&new->node.branches, EB_LEFT);
+ new_rght = eb_dotag(&new->node.branches, EB_RGHT);
+ new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
+
+ /* We need the common higher bits between new->key and old->key.
+ * What differences are there between new->key and the node here ?
+ * NOTE that bit(new) is always < bit(root) because highest
+ * bit of new->key and old->key are identical here (otherwise they
+ * would sit on different branches).
+ */
+
+ // note that if EB_NODE_BITS > 1, we should check that it's still >= 0
+ new->node.bit = flsnz(new->key ^ old->key) - EB_NODE_BITS;
+
+ if (new->key == old->key) {
+ new->node.bit = -1; /* mark as new dup tree, just in case */
+
+ if (likely(eb_gettag(root_right))) {
+ /* we refuse to duplicate this key if the tree is
+ * tagged as containing only unique keys.
+ */
+ return old;
+ }
+
+ if (eb_gettag(troot) != EB_LEAF) {
+ /* there was already a dup tree below */
+ struct eb_node *ret;
+ ret = eb_insert_dup(&old->node, &new->node);
+ return container_of(ret, struct eb32_node, node);
+ }
+ /* otherwise fall through */
+ }
+
+ if (new->key >= old->key) {
+ new->node.branches.b[EB_LEFT] = troot;
+ new->node.branches.b[EB_RGHT] = new_leaf;
+ new->node.leaf_p = new_rght;
+ *up_ptr = new_left;
+ }
+ else {
+ new->node.branches.b[EB_LEFT] = new_leaf;
+ new->node.branches.b[EB_RGHT] = troot;
+ new->node.leaf_p = new_left;
+ *up_ptr = new_rght;
+ }
+
+ /* Ok, now we are inserting <new> between <root> and <old>. <old>'s
+ * parent is already set to <new>, and the <root>'s branch is still in
+ * <side>. Update the root's leaf till we have it. Note that we can also
+ * find the side by checking the side of new->node.node_p.
+ */
+
+ root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
+ return new;
+}
+
+/* Insert eb32_node <new> into subtree starting at node root <root>, using
+ * signed keys. Only new->key needs be set with the key. The eb32_node
+ * is returned. If root->b[EB_RGHT]==1, the tree may only contain unique keys.
+ */
+static forceinline struct eb32_node *
+__eb32i_insert(struct eb_root *root, struct eb32_node *new) {
+ struct eb32_node *old;
+ unsigned int side;
+ eb_troot_t *troot, **up_ptr;
+ int newkey; /* caching the key saves approximately one cycle */
+ eb_troot_t *root_right;
+ eb_troot_t *new_left, *new_rght;
+ eb_troot_t *new_leaf;
+ int old_node_bit;
+
+ side = EB_LEFT;
+ troot = root->b[EB_LEFT];
+ root_right = root->b[EB_RGHT];
+ if (unlikely(troot == NULL)) {
+ /* Tree is empty, insert the leaf part below the left branch */
+ root->b[EB_LEFT] = eb_dotag(&new->node.branches, EB_LEAF);
+ new->node.leaf_p = eb_dotag(root, EB_LEFT);
+ new->node.node_p = NULL; /* node part unused */
+ return new;
+ }
+
+ /* The tree descent is fairly easy :
+ * - first, check if we have reached a leaf node
+ * - second, check if we have gone too far
+ * - third, reiterate
+ * Everywhere, we use <new> for the node node we are inserting, <root>
+ * for the node we attach it to, and <old> for the node we are
+ * displacing below <new>. <troot> will always point to the future node
+ * (tagged with its type). <side> carries the side the node <new> is
+ * attached to below its parent, which is also where previous node
+ * was attached. <newkey> carries a high bit shift of the key being
+ * inserted in order to have negative keys stored before positive
+ * ones.
+ */
+ newkey = new->key + 0x80000000;
+
+ while (1) {
+ if (eb_gettag(troot) == EB_LEAF) {
+ old = container_of(eb_untag(troot, EB_LEAF),
+ struct eb32_node, node.branches);
+ new->node.node_p = old->node.leaf_p;
+ up_ptr = &old->node.leaf_p;
+ break;
+ }
+
+ /* OK we're walking down this link */
+ old = container_of(eb_untag(troot, EB_NODE),
+ struct eb32_node, node.branches);
+ old_node_bit = old->node.bit;
+
+ /* Stop going down when we don't have common bits anymore. We
+ * also stop in front of a duplicates tree because it means we
+ * have to insert above.
+ */
+
+ if ((old_node_bit < 0) || /* we're above a duplicate tree, stop here */
+ (((new->key ^ old->key) >> old_node_bit) >= EB_NODE_BRANCHES)) {
+ /* The tree did not contain the key, so we insert <new> before the node
+ * <old>, and set ->bit to designate the lowest bit position in <new>
+ * which applies to ->branches.b[].
+ */
+ new->node.node_p = old->node.node_p;
+ up_ptr = &old->node.node_p;
+ break;
+ }
+
+ /* walk down */
+ root = &old->node.branches;
+ side = (newkey >> old_node_bit) & EB_NODE_BRANCH_MASK;
+ troot = root->b[side];
+ }
+
+ new_left = eb_dotag(&new->node.branches, EB_LEFT);
+ new_rght = eb_dotag(&new->node.branches, EB_RGHT);
+ new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
+
+ /* We need the common higher bits between new->key and old->key.
+ * What differences are there between new->key and the node here ?
+ * NOTE that bit(new) is always < bit(root) because highest
+ * bit of new->key and old->key are identical here (otherwise they
+ * would sit on different branches).
+ */
+
+ // note that if EB_NODE_BITS > 1, we should check that it's still >= 0
+ new->node.bit = flsnz(new->key ^ old->key) - EB_NODE_BITS;
+
+ if (new->key == old->key) {
+ new->node.bit = -1; /* mark as new dup tree, just in case */
+
+ if (likely(eb_gettag(root_right))) {
+ /* we refuse to duplicate this key if the tree is
+ * tagged as containing only unique keys.
+ */
+ return old;
+ }
+
+ if (eb_gettag(troot) != EB_LEAF) {
+ /* there was already a dup tree below */
+ struct eb_node *ret;
+ ret = eb_insert_dup(&old->node, &new->node);
+ return container_of(ret, struct eb32_node, node);
+ }
+ /* otherwise fall through */
+ }
+
+ if ((s32)new->key >= (s32)old->key) {
+ new->node.branches.b[EB_LEFT] = troot;
+ new->node.branches.b[EB_RGHT] = new_leaf;
+ new->node.leaf_p = new_rght;
+ *up_ptr = new_left;
+ }
+ else {
+ new->node.branches.b[EB_LEFT] = new_leaf;
+ new->node.branches.b[EB_RGHT] = troot;
+ new->node.leaf_p = new_left;
+ *up_ptr = new_rght;
+ }
+
+ /* Ok, now we are inserting <new> between <root> and <old>. <old>'s
+ * parent is already set to <new>, and the <root>'s branch is still in
+ * <side>. Update the root's leaf till we have it. Note that we can also
+ * find the side by checking the side of new->node.node_p.
+ */
+
+ root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
+ return new;
+}
+
+#endif /* _EB32_TREE_H */
diff --git a/include/import/eb64tree.h b/include/import/eb64tree.h
new file mode 100644
index 0000000..d6e5db4
--- /dev/null
+++ b/include/import/eb64tree.h
@@ -0,0 +1,575 @@
+/*
+ * Elastic Binary Trees - macros and structures for operations on 64bit nodes.
+ * Version 6.0.6
+ * (C) 2002-2011 - Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _EB64TREE_H
+#define _EB64TREE_H
+
+#include "ebtree.h"
+
+
+/* Return the structure of type <type> whose member <member> points to <ptr> */
+#define eb64_entry(ptr, type, member) container_of(ptr, type, member)
+
+/*
+ * Exported functions and macros.
+ * Many of them are always inlined because they are extremely small, and
+ * are generally called at most once or twice in a program.
+ */
+
+/* Return leftmost node in the tree, or NULL if none */
+static inline struct eb64_node *eb64_first(struct eb_root *root)
+{
+ return eb64_entry(eb_first(root), struct eb64_node, node);
+}
+
+/* Return rightmost node in the tree, or NULL if none */
+static inline struct eb64_node *eb64_last(struct eb_root *root)
+{
+ return eb64_entry(eb_last(root), struct eb64_node, node);
+}
+
+/* Return next node in the tree, or NULL if none */
+static inline struct eb64_node *eb64_next(struct eb64_node *eb64)
+{
+ return eb64_entry(eb_next(&eb64->node), struct eb64_node, node);
+}
+
+/* Return previous node in the tree, or NULL if none */
+static inline struct eb64_node *eb64_prev(struct eb64_node *eb64)
+{
+ return eb64_entry(eb_prev(&eb64->node), struct eb64_node, node);
+}
+
+/* Return next leaf node within a duplicate sub-tree, or NULL if none. */
+static inline struct eb64_node *eb64_next_dup(struct eb64_node *eb64)
+{
+ return eb64_entry(eb_next_dup(&eb64->node), struct eb64_node, node);
+}
+
+/* Return previous leaf node within a duplicate sub-tree, or NULL if none. */
+static inline struct eb64_node *eb64_prev_dup(struct eb64_node *eb64)
+{
+ return eb64_entry(eb_prev_dup(&eb64->node), struct eb64_node, node);
+}
+
+/* Return next node in the tree, skipping duplicates, or NULL if none */
+static inline struct eb64_node *eb64_next_unique(struct eb64_node *eb64)
+{
+ return eb64_entry(eb_next_unique(&eb64->node), struct eb64_node, node);
+}
+
+/* Return previous node in the tree, skipping duplicates, or NULL if none */
+static inline struct eb64_node *eb64_prev_unique(struct eb64_node *eb64)
+{
+ return eb64_entry(eb_prev_unique(&eb64->node), struct eb64_node, node);
+}
+
+/* Delete node from the tree if it was linked in. Mark the node unused. Note
+ * that this function relies on a non-inlined generic function: eb_delete.
+ */
+static inline void eb64_delete(struct eb64_node *eb64)
+{
+ eb_delete(&eb64->node);
+}
+
+/*
+ * The following functions are not inlined by default. They are declared
+ * in eb64tree.c, which simply relies on their inline version.
+ */
+struct eb64_node *eb64_lookup(struct eb_root *root, u64 x);
+struct eb64_node *eb64i_lookup(struct eb_root *root, s64 x);
+struct eb64_node *eb64_lookup_le(struct eb_root *root, u64 x);
+struct eb64_node *eb64_lookup_ge(struct eb_root *root, u64 x);
+struct eb64_node *eb64_insert(struct eb_root *root, struct eb64_node *new);
+struct eb64_node *eb64i_insert(struct eb_root *root, struct eb64_node *new);
+
+/*
+ * The following functions are less likely to be used directly, because their
+ * code is larger. The non-inlined version is preferred.
+ */
+
+/* Delete node from the tree if it was linked in. Mark the node unused. */
+static forceinline void __eb64_delete(struct eb64_node *eb64)
+{
+ __eb_delete(&eb64->node);
+}
+
+/*
+ * Find the first occurrence of a key in the tree <root>. If none can be
+ * found, return NULL.
+ */
+static forceinline struct eb64_node *__eb64_lookup(struct eb_root *root, u64 x)
+{
+ struct eb64_node *node;
+ eb_troot_t *troot;
+ u64 y;
+
+ troot = root->b[EB_LEFT];
+ if (unlikely(troot == NULL))
+ return NULL;
+
+ while (1) {
+ if ((eb_gettag(troot) == EB_LEAF)) {
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct eb64_node, node.branches);
+ if (node->key == x)
+ return node;
+ else
+ return NULL;
+ }
+ node = container_of(eb_untag(troot, EB_NODE),
+ struct eb64_node, node.branches);
+
+ y = node->key ^ x;
+ if (!y) {
+ /* Either we found the node which holds the key, or
+ * we have a dup tree. In the later case, we have to
+ * walk it down left to get the first entry.
+ */
+ if (node->node.bit < 0) {
+ troot = node->node.branches.b[EB_LEFT];
+ while (eb_gettag(troot) != EB_LEAF)
+ troot = (eb_untag(troot, EB_NODE))->b[EB_LEFT];
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct eb64_node, node.branches);
+ }
+ return node;
+ }
+
+ if ((y >> node->node.bit) >= EB_NODE_BRANCHES)
+ return NULL; /* no more common bits */
+
+ troot = node->node.branches.b[(x >> node->node.bit) & EB_NODE_BRANCH_MASK];
+ }
+}
+
+/*
+ * Find the first occurrence of a signed key in the tree <root>. If none can
+ * be found, return NULL.
+ */
+static forceinline struct eb64_node *__eb64i_lookup(struct eb_root *root, s64 x)
+{
+ struct eb64_node *node;
+ eb_troot_t *troot;
+ u64 key = x ^ (1ULL << 63);
+ u64 y;
+
+ troot = root->b[EB_LEFT];
+ if (unlikely(troot == NULL))
+ return NULL;
+
+ while (1) {
+ if ((eb_gettag(troot) == EB_LEAF)) {
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct eb64_node, node.branches);
+ if (node->key == (u64)x)
+ return node;
+ else
+ return NULL;
+ }
+ node = container_of(eb_untag(troot, EB_NODE),
+ struct eb64_node, node.branches);
+
+ y = node->key ^ x;
+ if (!y) {
+ /* Either we found the node which holds the key, or
+ * we have a dup tree. In the later case, we have to
+ * walk it down left to get the first entry.
+ */
+ if (node->node.bit < 0) {
+ troot = node->node.branches.b[EB_LEFT];
+ while (eb_gettag(troot) != EB_LEAF)
+ troot = (eb_untag(troot, EB_NODE))->b[EB_LEFT];
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct eb64_node, node.branches);
+ }
+ return node;
+ }
+
+ if ((y >> node->node.bit) >= EB_NODE_BRANCHES)
+ return NULL; /* no more common bits */
+
+ troot = node->node.branches.b[(key >> node->node.bit) & EB_NODE_BRANCH_MASK];
+ }
+}
+
+/* Insert eb64_node <new> into subtree starting at node root <root>.
+ * Only new->key needs be set with the key. The eb64_node is returned.
+ * If root->b[EB_RGHT]==1, the tree may only contain unique keys.
+ */
+static forceinline struct eb64_node *
+__eb64_insert(struct eb_root *root, struct eb64_node *new) {
+ struct eb64_node *old;
+ unsigned int side;
+ eb_troot_t *troot;
+ u64 newkey; /* caching the key saves approximately one cycle */
+ eb_troot_t *root_right;
+ int old_node_bit;
+
+ side = EB_LEFT;
+ troot = root->b[EB_LEFT];
+ root_right = root->b[EB_RGHT];
+ if (unlikely(troot == NULL)) {
+ /* Tree is empty, insert the leaf part below the left branch */
+ root->b[EB_LEFT] = eb_dotag(&new->node.branches, EB_LEAF);
+ new->node.leaf_p = eb_dotag(root, EB_LEFT);
+ new->node.node_p = NULL; /* node part unused */
+ return new;
+ }
+
+ /* The tree descent is fairly easy :
+ * - first, check if we have reached a leaf node
+ * - second, check if we have gone too far
+ * - third, reiterate
+ * Everywhere, we use <new> for the node node we are inserting, <root>
+ * for the node we attach it to, and <old> for the node we are
+ * displacing below <new>. <troot> will always point to the future node
+ * (tagged with its type). <side> carries the side the node <new> is
+ * attached to below its parent, which is also where previous node
+ * was attached. <newkey> carries the key being inserted.
+ */
+ newkey = new->key;
+
+ while (1) {
+ if (unlikely(eb_gettag(troot) == EB_LEAF)) {
+ eb_troot_t *new_left, *new_rght;
+ eb_troot_t *new_leaf, *old_leaf;
+
+ old = container_of(eb_untag(troot, EB_LEAF),
+ struct eb64_node, node.branches);
+
+ new_left = eb_dotag(&new->node.branches, EB_LEFT);
+ new_rght = eb_dotag(&new->node.branches, EB_RGHT);
+ new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
+ old_leaf = eb_dotag(&old->node.branches, EB_LEAF);
+
+ new->node.node_p = old->node.leaf_p;
+
+ /* Right here, we have 3 possibilities :
+ - the tree does not contain the key, and we have
+ new->key < old->key. We insert new above old, on
+ the left ;
+
+ - the tree does not contain the key, and we have
+ new->key > old->key. We insert new above old, on
+ the right ;
+
+ - the tree does contain the key, which implies it
+ is alone. We add the new key next to it as a
+ first duplicate.
+
+ The last two cases can easily be partially merged.
+ */
+
+ if (new->key < old->key) {
+ new->node.leaf_p = new_left;
+ old->node.leaf_p = new_rght;
+ new->node.branches.b[EB_LEFT] = new_leaf;
+ new->node.branches.b[EB_RGHT] = old_leaf;
+ } else {
+ /* we may refuse to duplicate this key if the tree is
+ * tagged as containing only unique keys.
+ */
+ if ((new->key == old->key) && eb_gettag(root_right))
+ return old;
+
+ /* new->key >= old->key, new goes the right */
+ old->node.leaf_p = new_left;
+ new->node.leaf_p = new_rght;
+ new->node.branches.b[EB_LEFT] = old_leaf;
+ new->node.branches.b[EB_RGHT] = new_leaf;
+
+ if (new->key == old->key) {
+ new->node.bit = -1;
+ root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
+ return new;
+ }
+ }
+ break;
+ }
+
+ /* OK we're walking down this link */
+ old = container_of(eb_untag(troot, EB_NODE),
+ struct eb64_node, node.branches);
+ old_node_bit = old->node.bit;
+
+ /* Stop going down when we don't have common bits anymore. We
+ * also stop in front of a duplicates tree because it means we
+ * have to insert above.
+ */
+
+ if ((old_node_bit < 0) || /* we're above a duplicate tree, stop here */
+ (((new->key ^ old->key) >> old_node_bit) >= EB_NODE_BRANCHES)) {
+ /* The tree did not contain the key, so we insert <new> before the node
+ * <old>, and set ->bit to designate the lowest bit position in <new>
+ * which applies to ->branches.b[].
+ */
+ eb_troot_t *new_left, *new_rght;
+ eb_troot_t *new_leaf, *old_node;
+
+ new_left = eb_dotag(&new->node.branches, EB_LEFT);
+ new_rght = eb_dotag(&new->node.branches, EB_RGHT);
+ new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
+ old_node = eb_dotag(&old->node.branches, EB_NODE);
+
+ new->node.node_p = old->node.node_p;
+
+ if (new->key < old->key) {
+ new->node.leaf_p = new_left;
+ old->node.node_p = new_rght;
+ new->node.branches.b[EB_LEFT] = new_leaf;
+ new->node.branches.b[EB_RGHT] = old_node;
+ }
+ else if (new->key > old->key) {
+ old->node.node_p = new_left;
+ new->node.leaf_p = new_rght;
+ new->node.branches.b[EB_LEFT] = old_node;
+ new->node.branches.b[EB_RGHT] = new_leaf;
+ }
+ else {
+ struct eb_node *ret;
+ ret = eb_insert_dup(&old->node, &new->node);
+ return container_of(ret, struct eb64_node, node);
+ }
+ break;
+ }
+
+ /* walk down */
+ root = &old->node.branches;
+
+ if (sizeof(long) >= 8) {
+ side = newkey >> old_node_bit;
+ } else {
+ /* note: provides the best code on low-register count archs
+ * such as i386.
+ */
+ side = newkey;
+ side >>= old_node_bit;
+ if (old_node_bit >= 32) {
+ side = newkey >> 32;
+ side >>= old_node_bit & 0x1F;
+ }
+ }
+ side &= EB_NODE_BRANCH_MASK;
+ troot = root->b[side];
+ }
+
+ /* Ok, now we are inserting <new> between <root> and <old>. <old>'s
+ * parent is already set to <new>, and the <root>'s branch is still in
+ * <side>. Update the root's leaf till we have it. Note that we can also
+ * find the side by checking the side of new->node.node_p.
+ */
+
+ /* We need the common higher bits between new->key and old->key.
+ * What differences are there between new->key and the node here ?
+ * NOTE that bit(new) is always < bit(root) because highest
+ * bit of new->key and old->key are identical here (otherwise they
+ * would sit on different branches).
+ */
+ // note that if EB_NODE_BITS > 1, we should check that it's still >= 0
+ new->node.bit = fls64(new->key ^ old->key) - EB_NODE_BITS;
+ root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
+
+ return new;
+}
+
+/* Insert eb64_node <new> into subtree starting at node root <root>, using
+ * signed keys. Only new->key needs be set with the key. The eb64_node
+ * is returned. If root->b[EB_RGHT]==1, the tree may only contain unique keys.
+ */
+static forceinline struct eb64_node *
+__eb64i_insert(struct eb_root *root, struct eb64_node *new) {
+ struct eb64_node *old;
+ unsigned int side;
+ eb_troot_t *troot;
+ u64 newkey; /* caching the key saves approximately one cycle */
+ eb_troot_t *root_right;
+ int old_node_bit;
+
+ side = EB_LEFT;
+ troot = root->b[EB_LEFT];
+ root_right = root->b[EB_RGHT];
+ if (unlikely(troot == NULL)) {
+ /* Tree is empty, insert the leaf part below the left branch */
+ root->b[EB_LEFT] = eb_dotag(&new->node.branches, EB_LEAF);
+ new->node.leaf_p = eb_dotag(root, EB_LEFT);
+ new->node.node_p = NULL; /* node part unused */
+ return new;
+ }
+
+ /* The tree descent is fairly easy :
+ * - first, check if we have reached a leaf node
+ * - second, check if we have gone too far
+ * - third, reiterate
+ * Everywhere, we use <new> for the node node we are inserting, <root>
+ * for the node we attach it to, and <old> for the node we are
+ * displacing below <new>. <troot> will always point to the future node
+ * (tagged with its type). <side> carries the side the node <new> is
+ * attached to below its parent, which is also where previous node
+ * was attached. <newkey> carries a high bit shift of the key being
+ * inserted in order to have negative keys stored before positive
+ * ones.
+ */
+ newkey = new->key ^ (1ULL << 63);
+
+ while (1) {
+ if (unlikely(eb_gettag(troot) == EB_LEAF)) {
+ eb_troot_t *new_left, *new_rght;
+ eb_troot_t *new_leaf, *old_leaf;
+
+ old = container_of(eb_untag(troot, EB_LEAF),
+ struct eb64_node, node.branches);
+
+ new_left = eb_dotag(&new->node.branches, EB_LEFT);
+ new_rght = eb_dotag(&new->node.branches, EB_RGHT);
+ new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
+ old_leaf = eb_dotag(&old->node.branches, EB_LEAF);
+
+ new->node.node_p = old->node.leaf_p;
+
+ /* Right here, we have 3 possibilities :
+ - the tree does not contain the key, and we have
+ new->key < old->key. We insert new above old, on
+ the left ;
+
+ - the tree does not contain the key, and we have
+ new->key > old->key. We insert new above old, on
+ the right ;
+
+ - the tree does contain the key, which implies it
+ is alone. We add the new key next to it as a
+ first duplicate.
+
+ The last two cases can easily be partially merged.
+ */
+
+ if ((s64)new->key < (s64)old->key) {
+ new->node.leaf_p = new_left;
+ old->node.leaf_p = new_rght;
+ new->node.branches.b[EB_LEFT] = new_leaf;
+ new->node.branches.b[EB_RGHT] = old_leaf;
+ } else {
+ /* we may refuse to duplicate this key if the tree is
+ * tagged as containing only unique keys.
+ */
+ if ((new->key == old->key) && eb_gettag(root_right))
+ return old;
+
+ /* new->key >= old->key, new goes the right */
+ old->node.leaf_p = new_left;
+ new->node.leaf_p = new_rght;
+ new->node.branches.b[EB_LEFT] = old_leaf;
+ new->node.branches.b[EB_RGHT] = new_leaf;
+
+ if (new->key == old->key) {
+ new->node.bit = -1;
+ root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
+ return new;
+ }
+ }
+ break;
+ }
+
+ /* OK we're walking down this link */
+ old = container_of(eb_untag(troot, EB_NODE),
+ struct eb64_node, node.branches);
+ old_node_bit = old->node.bit;
+
+ /* Stop going down when we don't have common bits anymore. We
+ * also stop in front of a duplicates tree because it means we
+ * have to insert above.
+ */
+
+ if ((old_node_bit < 0) || /* we're above a duplicate tree, stop here */
+ (((new->key ^ old->key) >> old_node_bit) >= EB_NODE_BRANCHES)) {
+ /* The tree did not contain the key, so we insert <new> before the node
+ * <old>, and set ->bit to designate the lowest bit position in <new>
+ * which applies to ->branches.b[].
+ */
+ eb_troot_t *new_left, *new_rght;
+ eb_troot_t *new_leaf, *old_node;
+
+ new_left = eb_dotag(&new->node.branches, EB_LEFT);
+ new_rght = eb_dotag(&new->node.branches, EB_RGHT);
+ new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
+ old_node = eb_dotag(&old->node.branches, EB_NODE);
+
+ new->node.node_p = old->node.node_p;
+
+ if ((s64)new->key < (s64)old->key) {
+ new->node.leaf_p = new_left;
+ old->node.node_p = new_rght;
+ new->node.branches.b[EB_LEFT] = new_leaf;
+ new->node.branches.b[EB_RGHT] = old_node;
+ }
+ else if ((s64)new->key > (s64)old->key) {
+ old->node.node_p = new_left;
+ new->node.leaf_p = new_rght;
+ new->node.branches.b[EB_LEFT] = old_node;
+ new->node.branches.b[EB_RGHT] = new_leaf;
+ }
+ else {
+ struct eb_node *ret;
+ ret = eb_insert_dup(&old->node, &new->node);
+ return container_of(ret, struct eb64_node, node);
+ }
+ break;
+ }
+
+ /* walk down */
+ root = &old->node.branches;
+
+ if (sizeof(long) >= 8) {
+ side = newkey >> old_node_bit;
+ } else {
+ /* note: provides the best code on low-register count archs
+ * such as i386.
+ */
+ side = newkey;
+ side >>= old_node_bit;
+ if (old_node_bit >= 32) {
+ side = newkey >> 32;
+ side >>= old_node_bit & 0x1F;
+ }
+ }
+ side &= EB_NODE_BRANCH_MASK;
+ troot = root->b[side];
+ }
+
+ /* Ok, now we are inserting <new> between <root> and <old>. <old>'s
+ * parent is already set to <new>, and the <root>'s branch is still in
+ * <side>. Update the root's leaf till we have it. Note that we can also
+ * find the side by checking the side of new->node.node_p.
+ */
+
+ /* We need the common higher bits between new->key and old->key.
+ * What differences are there between new->key and the node here ?
+ * NOTE that bit(new) is always < bit(root) because highest
+ * bit of new->key and old->key are identical here (otherwise they
+ * would sit on different branches).
+ */
+ // note that if EB_NODE_BITS > 1, we should check that it's still >= 0
+ new->node.bit = fls64(new->key ^ old->key) - EB_NODE_BITS;
+ root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
+
+ return new;
+}
+
+#endif /* _EB64_TREE_H */
diff --git a/include/import/ebimtree.h b/include/import/ebimtree.h
new file mode 100644
index 0000000..0afbdd1
--- /dev/null
+++ b/include/import/ebimtree.h
@@ -0,0 +1,324 @@
+/*
+ * Elastic Binary Trees - macros for Indirect Multi-Byte data nodes.
+ * Version 6.0.6
+ * (C) 2002-2011 - Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _EBIMTREE_H
+#define _EBIMTREE_H
+
+#include <string.h>
+#include "ebtree.h"
+#include "ebpttree.h"
+
+/* These functions and macros rely on Pointer nodes and use the <key> entry as
+ * a pointer to an indirect key. Most operations are performed using ebpt_*.
+ */
+
+/* The following functions are not inlined by default. They are declared
+ * in ebimtree.c, which simply relies on their inline version.
+ */
+struct ebpt_node *ebim_lookup(struct eb_root *root, const void *x, unsigned int len);
+struct ebpt_node *ebim_insert(struct eb_root *root, struct ebpt_node *new, unsigned int len);
+
+/* Find the first occurrence of a key of a least <len> bytes matching <x> in the
+ * tree <root>. The caller is responsible for ensuring that <len> will not exceed
+ * the common parts between the tree's keys and <x>. In case of multiple matches,
+ * the leftmost node is returned. This means that this function can be used to
+ * lookup string keys by prefix if all keys in the tree are zero-terminated. If
+ * no match is found, NULL is returned. Returns first node if <len> is zero.
+ */
+static forceinline struct ebpt_node *
+__ebim_lookup(struct eb_root *root, const void *x, unsigned int len)
+{
+ struct ebpt_node *node;
+ eb_troot_t *troot;
+ int pos, side;
+ int node_bit;
+
+ troot = root->b[EB_LEFT];
+ if (unlikely(troot == NULL))
+ goto ret_null;
+
+ if (unlikely(len == 0))
+ goto walk_down;
+
+ pos = 0;
+ while (1) {
+ if (eb_gettag(troot) == EB_LEAF) {
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct ebpt_node, node.branches);
+ if (eb_memcmp(node->key + pos, x, len) != 0)
+ goto ret_null;
+ else
+ goto ret_node;
+ }
+ node = container_of(eb_untag(troot, EB_NODE),
+ struct ebpt_node, node.branches);
+
+ node_bit = node->node.bit;
+ if (node_bit < 0) {
+ /* We have a dup tree now. Either it's for the same
+ * value, and we walk down left, or it's a different
+ * one and we don't have our key.
+ */
+ if (eb_memcmp(node->key + pos, x, len) != 0)
+ goto ret_null;
+ else
+ goto walk_left;
+ }
+
+ /* OK, normal data node, let's walk down. We check if all full
+ * bytes are equal, and we start from the last one we did not
+ * completely check. We stop as soon as we reach the last byte,
+ * because we must decide to go left/right or abort.
+ */
+ node_bit = ~node_bit + (pos << 3) + 8; // = (pos<<3) + (7 - node_bit)
+ if (node_bit < 0) {
+ /* This surprising construction gives better performance
+ * because gcc does not try to reorder the loop. Tested to
+ * be fine with 2.95 to 4.2.
+ */
+ while (1) {
+ if (*(unsigned char*)(node->key + pos++) ^ *(unsigned char*)(x++))
+ goto ret_null; /* more than one full byte is different */
+ if (--len == 0)
+ goto walk_left; /* return first node if all bytes matched */
+ node_bit += 8;
+ if (node_bit >= 0)
+ break;
+ }
+ }
+
+ /* here we know that only the last byte differs, so node_bit < 8.
+ * We have 2 possibilities :
+ * - more than the last bit differs => return NULL
+ * - walk down on side = (x[pos] >> node_bit) & 1
+ */
+ side = *(unsigned char *)x >> node_bit;
+ if (((*(unsigned char*)(node->key + pos) >> node_bit) ^ side) > 1)
+ goto ret_null;
+ side &= 1;
+ troot = node->node.branches.b[side];
+ }
+ walk_left:
+ troot = node->node.branches.b[EB_LEFT];
+ walk_down:
+ while (eb_gettag(troot) != EB_LEAF)
+ troot = (eb_untag(troot, EB_NODE))->b[EB_LEFT];
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct ebpt_node, node.branches);
+ ret_node:
+ return node;
+ ret_null:
+ return NULL;
+}
+
+/* Insert ebpt_node <new> into subtree starting at node root <root>.
+ * Only new->key needs be set with the key. The ebpt_node is returned.
+ * If root->b[EB_RGHT]==1, the tree may only contain unique keys. The
+ * len is specified in bytes.
+ */
+static forceinline struct ebpt_node *
+__ebim_insert(struct eb_root *root, struct ebpt_node *new, unsigned int len)
+{
+ struct ebpt_node *old;
+ unsigned int side;
+ eb_troot_t *troot;
+ eb_troot_t *root_right;
+ int diff;
+ int bit;
+ int old_node_bit;
+
+ side = EB_LEFT;
+ troot = root->b[EB_LEFT];
+ root_right = root->b[EB_RGHT];
+ if (unlikely(troot == NULL)) {
+ /* Tree is empty, insert the leaf part below the left branch */
+ root->b[EB_LEFT] = eb_dotag(&new->node.branches, EB_LEAF);
+ new->node.leaf_p = eb_dotag(root, EB_LEFT);
+ new->node.node_p = NULL; /* node part unused */
+ return new;
+ }
+
+ len <<= 3;
+
+ /* The tree descent is fairly easy :
+ * - first, check if we have reached a leaf node
+ * - second, check if we have gone too far
+ * - third, reiterate
+ * Everywhere, we use <new> for the node node we are inserting, <root>
+ * for the node we attach it to, and <old> for the node we are
+ * displacing below <new>. <troot> will always point to the future node
+ * (tagged with its type). <side> carries the side the node <new> is
+ * attached to below its parent, which is also where previous node
+ * was attached.
+ */
+
+ bit = 0;
+ while (1) {
+ if (unlikely(eb_gettag(troot) == EB_LEAF)) {
+ eb_troot_t *new_left, *new_rght;
+ eb_troot_t *new_leaf, *old_leaf;
+
+ old = container_of(eb_untag(troot, EB_LEAF),
+ struct ebpt_node, node.branches);
+
+ new_left = eb_dotag(&new->node.branches, EB_LEFT);
+ new_rght = eb_dotag(&new->node.branches, EB_RGHT);
+ new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
+ old_leaf = eb_dotag(&old->node.branches, EB_LEAF);
+
+ new->node.node_p = old->node.leaf_p;
+
+ /* Right here, we have 3 possibilities :
+ * - the tree does not contain the key, and we have
+ * new->key < old->key. We insert new above old, on
+ * the left ;
+ *
+ * - the tree does not contain the key, and we have
+ * new->key > old->key. We insert new above old, on
+ * the right ;
+ *
+ * - the tree does contain the key, which implies it
+ * is alone. We add the new key next to it as a
+ * first duplicate.
+ *
+ * The last two cases can easily be partially merged.
+ */
+ bit = equal_bits(new->key, old->key, bit, len);
+
+ /* Note: we can compare more bits than the current node's because as
+ * long as they are identical, we know we descend along the correct
+ * side. However we don't want to start to compare past the end.
+ */
+ diff = 0;
+ if (((unsigned)bit >> 3) < len)
+ diff = cmp_bits(new->key, old->key, bit);
+
+ if (diff < 0) {
+ new->node.leaf_p = new_left;
+ old->node.leaf_p = new_rght;
+ new->node.branches.b[EB_LEFT] = new_leaf;
+ new->node.branches.b[EB_RGHT] = old_leaf;
+ } else {
+ /* we may refuse to duplicate this key if the tree is
+ * tagged as containing only unique keys.
+ */
+ if (diff == 0 && eb_gettag(root_right))
+ return old;
+
+ /* new->key >= old->key, new goes the right */
+ old->node.leaf_p = new_left;
+ new->node.leaf_p = new_rght;
+ new->node.branches.b[EB_LEFT] = old_leaf;
+ new->node.branches.b[EB_RGHT] = new_leaf;
+
+ if (diff == 0) {
+ new->node.bit = -1;
+ root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
+ return new;
+ }
+ }
+ break;
+ }
+
+ /* OK we're walking down this link */
+ old = container_of(eb_untag(troot, EB_NODE),
+ struct ebpt_node, node.branches);
+ old_node_bit = old->node.bit;
+
+ /* Stop going down when we don't have common bits anymore. We
+ * also stop in front of a duplicates tree because it means we
+ * have to insert above. Note: we can compare more bits than
+ * the current node's because as long as they are identical, we
+ * know we descend along the correct side.
+ */
+ if (old_node_bit < 0) {
+ /* we're above a duplicate tree, we must compare till the end */
+ bit = equal_bits(new->key, old->key, bit, len);
+ goto dup_tree;
+ }
+ else if (bit < old_node_bit) {
+ bit = equal_bits(new->key, old->key, bit, old_node_bit);
+ }
+
+ if (bit < old_node_bit) { /* we don't have all bits in common */
+ /* The tree did not contain the key, so we insert <new> before the node
+ * <old>, and set ->bit to designate the lowest bit position in <new>
+ * which applies to ->branches.b[].
+ */
+ eb_troot_t *new_left, *new_rght;
+ eb_troot_t *new_leaf, *old_node;
+
+ dup_tree:
+ new_left = eb_dotag(&new->node.branches, EB_LEFT);
+ new_rght = eb_dotag(&new->node.branches, EB_RGHT);
+ new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
+ old_node = eb_dotag(&old->node.branches, EB_NODE);
+
+ new->node.node_p = old->node.node_p;
+
+ /* Note: we can compare more bits than the current node's because as
+ * long as they are identical, we know we descend along the correct
+ * side. However we don't want to start to compare past the end.
+ */
+ diff = 0;
+ if (((unsigned)bit >> 3) < len)
+ diff = cmp_bits(new->key, old->key, bit);
+
+ if (diff < 0) {
+ new->node.leaf_p = new_left;
+ old->node.node_p = new_rght;
+ new->node.branches.b[EB_LEFT] = new_leaf;
+ new->node.branches.b[EB_RGHT] = old_node;
+ }
+ else if (diff > 0) {
+ old->node.node_p = new_left;
+ new->node.leaf_p = new_rght;
+ new->node.branches.b[EB_LEFT] = old_node;
+ new->node.branches.b[EB_RGHT] = new_leaf;
+ }
+ else {
+ struct eb_node *ret;
+ ret = eb_insert_dup(&old->node, &new->node);
+ return container_of(ret, struct ebpt_node, node);
+ }
+ break;
+ }
+
+ /* walk down */
+ root = &old->node.branches;
+ side = (((unsigned char *)new->key)[old_node_bit >> 3] >> (~old_node_bit & 7)) & 1;
+ troot = root->b[side];
+ }
+
+ /* Ok, now we are inserting <new> between <root> and <old>. <old>'s
+ * parent is already set to <new>, and the <root>'s branch is still in
+ * <side>. Update the root's leaf till we have it. Note that we can also
+ * find the side by checking the side of new->node.node_p.
+ */
+
+ /* We need the common higher bits between new->key and old->key.
+ * This number of bits is already in <bit>.
+ */
+ new->node.bit = bit;
+ root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
+ return new;
+}
+
+#endif /* _EBIMTREE_H */
diff --git a/include/import/ebistree.h b/include/import/ebistree.h
new file mode 100644
index 0000000..a438fa1
--- /dev/null
+++ b/include/import/ebistree.h
@@ -0,0 +1,329 @@
+/*
+ * Elastic Binary Trees - macros to manipulate Indirect String data nodes.
+ * Version 6.0.6
+ * (C) 2002-2011 - Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/* These functions and macros rely on Multi-Byte nodes */
+
+#ifndef _EBISTREE_H
+#define _EBISTREE_H
+
+#include <string.h>
+#include "ebtree.h"
+#include "ebpttree.h"
+#include "ebimtree.h"
+
+/* These functions and macros rely on Pointer nodes and use the <key> entry as
+ * a pointer to an indirect key. Most operations are performed using ebpt_*.
+ */
+
+/* The following functions are not inlined by default. They are declared
+ * in ebistree.c, which simply relies on their inline version.
+ */
+struct ebpt_node *ebis_lookup(struct eb_root *root, const char *x);
+struct ebpt_node *ebis_insert(struct eb_root *root, struct ebpt_node *new);
+
+/* Find the first occurrence of a length <len> string <x> in the tree <root>.
+ * It's the caller's responsibility to use this function only on trees which
+ * only contain zero-terminated strings, and that no null character is present
+ * in string <x> in the first <len> chars. If none can be found, return NULL.
+ */
+static forceinline struct ebpt_node *
+ebis_lookup_len(struct eb_root *root, const char *x, unsigned int len)
+{
+ struct ebpt_node *node;
+
+ node = ebim_lookup(root, x, len);
+ if (!node || ((const char *)node->key)[len] != 0)
+ return NULL;
+ return node;
+}
+
+/* Find the first occurrence of a zero-terminated string <x> in the tree <root>.
+ * It's the caller's responsibility to use this function only on trees which
+ * only contain zero-terminated strings. If none can be found, return NULL.
+ */
+static forceinline struct ebpt_node *__ebis_lookup(struct eb_root *root, const void *x)
+{
+ struct ebpt_node *node;
+ eb_troot_t *troot;
+ int bit;
+ int node_bit;
+
+ troot = root->b[EB_LEFT];
+ if (unlikely(troot == NULL))
+ return NULL;
+
+ bit = 0;
+ while (1) {
+ if ((eb_gettag(troot) == EB_LEAF)) {
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct ebpt_node, node.branches);
+ if (strcmp(node->key, x) == 0)
+ return node;
+ else
+ return NULL;
+ }
+ node = container_of(eb_untag(troot, EB_NODE),
+ struct ebpt_node, node.branches);
+ node_bit = node->node.bit;
+
+ if (node_bit < 0) {
+ /* We have a dup tree now. Either it's for the same
+ * value, and we walk down left, or it's a different
+ * one and we don't have our key.
+ */
+ if (strcmp(node->key, x) != 0)
+ return NULL;
+
+ troot = node->node.branches.b[EB_LEFT];
+ while (eb_gettag(troot) != EB_LEAF)
+ troot = (eb_untag(troot, EB_NODE))->b[EB_LEFT];
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct ebpt_node, node.branches);
+ return node;
+ }
+
+ /* OK, normal data node, let's walk down but don't compare data
+ * if we already reached the end of the key.
+ */
+ if (likely(bit >= 0)) {
+ bit = string_equal_bits(x, node->key, bit);
+ if (likely(bit < node_bit)) {
+ if (bit >= 0)
+ return NULL; /* no more common bits */
+
+ /* bit < 0 : we reached the end of the key. If we
+ * are in a tree with unique keys, we can return
+ * this node. Otherwise we have to walk it down
+ * and stop comparing bits.
+ */
+ if (eb_gettag(root->b[EB_RGHT]))
+ return node;
+ }
+ /* if the bit is larger than the node's, we must bound it
+ * because we might have compared too many bytes with an
+ * inappropriate leaf. For a test, build a tree from "0",
+ * "WW", "W", "S" inserted in this exact sequence and lookup
+ * "W" => "S" is returned without this assignment.
+ */
+ else
+ bit = node_bit;
+ }
+
+ troot = node->node.branches.b[(((unsigned char*)x)[node_bit >> 3] >>
+ (~node_bit & 7)) & 1];
+ }
+}
+
+/* Insert ebpt_node <new> into subtree starting at node root <root>. Only
+ * new->key needs be set with the zero-terminated string key. The ebpt_node is
+ * returned. If root->b[EB_RGHT]==1, the tree may only contain unique keys. The
+ * caller is responsible for properly terminating the key with a zero.
+ */
+static forceinline struct ebpt_node *
+__ebis_insert(struct eb_root *root, struct ebpt_node *new)
+{
+ struct ebpt_node *old;
+ unsigned int side;
+ eb_troot_t *troot;
+ eb_troot_t *root_right;
+ int diff;
+ int bit;
+ int old_node_bit;
+
+ side = EB_LEFT;
+ troot = root->b[EB_LEFT];
+ root_right = root->b[EB_RGHT];
+ if (unlikely(troot == NULL)) {
+ /* Tree is empty, insert the leaf part below the left branch */
+ root->b[EB_LEFT] = eb_dotag(&new->node.branches, EB_LEAF);
+ new->node.leaf_p = eb_dotag(root, EB_LEFT);
+ new->node.node_p = NULL; /* node part unused */
+ return new;
+ }
+
+ /* The tree descent is fairly easy :
+ * - first, check if we have reached a leaf node
+ * - second, check if we have gone too far
+ * - third, reiterate
+ * Everywhere, we use <new> for the node node we are inserting, <root>
+ * for the node we attach it to, and <old> for the node we are
+ * displacing below <new>. <troot> will always point to the future node
+ * (tagged with its type). <side> carries the side the node <new> is
+ * attached to below its parent, which is also where previous node
+ * was attached.
+ */
+
+ bit = 0;
+ while (1) {
+ if (unlikely(eb_gettag(troot) == EB_LEAF)) {
+ eb_troot_t *new_left, *new_rght;
+ eb_troot_t *new_leaf, *old_leaf;
+
+ old = container_of(eb_untag(troot, EB_LEAF),
+ struct ebpt_node, node.branches);
+
+ new_left = eb_dotag(&new->node.branches, EB_LEFT);
+ new_rght = eb_dotag(&new->node.branches, EB_RGHT);
+ new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
+ old_leaf = eb_dotag(&old->node.branches, EB_LEAF);
+
+ new->node.node_p = old->node.leaf_p;
+
+ /* Right here, we have 3 possibilities :
+ * - the tree does not contain the key, and we have
+ * new->key < old->key. We insert new above old, on
+ * the left ;
+ *
+ * - the tree does not contain the key, and we have
+ * new->key > old->key. We insert new above old, on
+ * the right ;
+ *
+ * - the tree does contain the key, which implies it
+ * is alone. We add the new key next to it as a
+ * first duplicate.
+ *
+ * The last two cases can easily be partially merged.
+ */
+ if (bit >= 0)
+ bit = string_equal_bits(new->key, old->key, bit);
+
+ if (bit < 0) {
+ /* key was already there */
+
+ /* we may refuse to duplicate this key if the tree is
+ * tagged as containing only unique keys.
+ */
+ if (eb_gettag(root_right))
+ return old;
+
+ /* new arbitrarily goes to the right and tops the dup tree */
+ old->node.leaf_p = new_left;
+ new->node.leaf_p = new_rght;
+ new->node.branches.b[EB_LEFT] = old_leaf;
+ new->node.branches.b[EB_RGHT] = new_leaf;
+ new->node.bit = -1;
+ root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
+ return new;
+ }
+
+ diff = cmp_bits(new->key, old->key, bit);
+ if (diff < 0) {
+ /* new->key < old->key, new takes the left */
+ new->node.leaf_p = new_left;
+ old->node.leaf_p = new_rght;
+ new->node.branches.b[EB_LEFT] = new_leaf;
+ new->node.branches.b[EB_RGHT] = old_leaf;
+ } else {
+ /* new->key > old->key, new takes the right */
+ old->node.leaf_p = new_left;
+ new->node.leaf_p = new_rght;
+ new->node.branches.b[EB_LEFT] = old_leaf;
+ new->node.branches.b[EB_RGHT] = new_leaf;
+ }
+ break;
+ }
+
+ /* OK we're walking down this link */
+ old = container_of(eb_untag(troot, EB_NODE),
+ struct ebpt_node, node.branches);
+ old_node_bit = old->node.bit;
+
+ /* Stop going down when we don't have common bits anymore. We
+ * also stop in front of a duplicates tree because it means we
+ * have to insert above. Note: we can compare more bits than
+ * the current node's because as long as they are identical, we
+ * know we descend along the correct side.
+ */
+ if (bit >= 0 && (bit < old_node_bit || old_node_bit < 0))
+ bit = string_equal_bits(new->key, old->key, bit);
+
+ if (unlikely(bit < 0)) {
+ /* Perfect match, we must only stop on head of dup tree
+ * or walk down to a leaf.
+ */
+ if (old_node_bit < 0) {
+ /* We know here that string_equal_bits matched all
+ * bits and that we're on top of a dup tree, then
+ * we can perform the dup insertion and return.
+ */
+ struct eb_node *ret;
+ ret = eb_insert_dup(&old->node, &new->node);
+ return container_of(ret, struct ebpt_node, node);
+ }
+ /* OK so let's walk down */
+ }
+ else if (bit < old_node_bit || old_node_bit < 0) {
+ /* The tree did not contain the key, or we stopped on top of a dup
+ * tree, possibly containing the key. In the former case, we insert
+ * <new> before the node <old>, and set ->bit to designate the lowest
+ * bit position in <new> which applies to ->branches.b[]. In the later
+ * case, we add the key to the existing dup tree. Note that we cannot
+ * enter here if we match an intermediate node's key that is not the
+ * head of a dup tree.
+ */
+ eb_troot_t *new_left, *new_rght;
+ eb_troot_t *new_leaf, *old_node;
+
+ new_left = eb_dotag(&new->node.branches, EB_LEFT);
+ new_rght = eb_dotag(&new->node.branches, EB_RGHT);
+ new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
+ old_node = eb_dotag(&old->node.branches, EB_NODE);
+
+ new->node.node_p = old->node.node_p;
+
+ /* we can never match all bits here */
+ diff = cmp_bits(new->key, old->key, bit);
+ if (diff < 0) {
+ new->node.leaf_p = new_left;
+ old->node.node_p = new_rght;
+ new->node.branches.b[EB_LEFT] = new_leaf;
+ new->node.branches.b[EB_RGHT] = old_node;
+ }
+ else {
+ old->node.node_p = new_left;
+ new->node.leaf_p = new_rght;
+ new->node.branches.b[EB_LEFT] = old_node;
+ new->node.branches.b[EB_RGHT] = new_leaf;
+ }
+ break;
+ }
+
+ /* walk down */
+ root = &old->node.branches;
+ side = (((unsigned char *)new->key)[old_node_bit >> 3] >> (~old_node_bit & 7)) & 1;
+ troot = root->b[side];
+ }
+
+ /* Ok, now we are inserting <new> between <root> and <old>. <old>'s
+ * parent is already set to <new>, and the <root>'s branch is still in
+ * <side>. Update the root's leaf till we have it. Note that we can also
+ * find the side by checking the side of new->node.node_p.
+ */
+
+ /* We need the common higher bits between new->key and old->key.
+ * This number of bits is already in <bit>.
+ * NOTE: we can't get here whit bit < 0 since we found a dup !
+ */
+ new->node.bit = bit;
+ root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
+ return new;
+}
+
+#endif /* _EBISTREE_H */
diff --git a/include/import/ebmbtree.h b/include/import/ebmbtree.h
new file mode 100644
index 0000000..365042e
--- /dev/null
+++ b/include/import/ebmbtree.h
@@ -0,0 +1,850 @@
+/*
+ * Elastic Binary Trees - macros and structures for Multi-Byte data nodes.
+ * Version 6.0.6
+ * (C) 2002-2011 - Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _EBMBTREE_H
+#define _EBMBTREE_H
+
+#include <string.h>
+#include "ebtree.h"
+
+/* Return the structure of type <type> whose member <member> points to <ptr> */
+#define ebmb_entry(ptr, type, member) container_of(ptr, type, member)
+
+/*
+ * Exported functions and macros.
+ * Many of them are always inlined because they are extremely small, and
+ * are generally called at most once or twice in a program.
+ */
+
+/* Return leftmost node in the tree, or NULL if none */
+static forceinline struct ebmb_node *ebmb_first(struct eb_root *root)
+{
+ return ebmb_entry(eb_first(root), struct ebmb_node, node);
+}
+
+/* Return rightmost node in the tree, or NULL if none */
+static forceinline struct ebmb_node *ebmb_last(struct eb_root *root)
+{
+ return ebmb_entry(eb_last(root), struct ebmb_node, node);
+}
+
+/* Return next node in the tree, or NULL if none */
+static forceinline struct ebmb_node *ebmb_next(struct ebmb_node *ebmb)
+{
+ return ebmb_entry(eb_next(&ebmb->node), struct ebmb_node, node);
+}
+
+/* Return previous node in the tree, or NULL if none */
+static forceinline struct ebmb_node *ebmb_prev(struct ebmb_node *ebmb)
+{
+ return ebmb_entry(eb_prev(&ebmb->node), struct ebmb_node, node);
+}
+
+/* Return next leaf node within a duplicate sub-tree, or NULL if none. */
+static inline struct ebmb_node *ebmb_next_dup(struct ebmb_node *ebmb)
+{
+ return ebmb_entry(eb_next_dup(&ebmb->node), struct ebmb_node, node);
+}
+
+/* Return previous leaf node within a duplicate sub-tree, or NULL if none. */
+static inline struct ebmb_node *ebmb_prev_dup(struct ebmb_node *ebmb)
+{
+ return ebmb_entry(eb_prev_dup(&ebmb->node), struct ebmb_node, node);
+}
+
+/* Return next node in the tree, skipping duplicates, or NULL if none */
+static forceinline struct ebmb_node *ebmb_next_unique(struct ebmb_node *ebmb)
+{
+ return ebmb_entry(eb_next_unique(&ebmb->node), struct ebmb_node, node);
+}
+
+/* Return previous node in the tree, skipping duplicates, or NULL if none */
+static forceinline struct ebmb_node *ebmb_prev_unique(struct ebmb_node *ebmb)
+{
+ return ebmb_entry(eb_prev_unique(&ebmb->node), struct ebmb_node, node);
+}
+
+/* Delete node from the tree if it was linked in. Mark the node unused. Note
+ * that this function relies on a non-inlined generic function: eb_delete.
+ */
+static forceinline void ebmb_delete(struct ebmb_node *ebmb)
+{
+ eb_delete(&ebmb->node);
+}
+
+/* The following functions are not inlined by default. They are declared
+ * in ebmbtree.c, which simply relies on their inline version.
+ */
+struct ebmb_node *ebmb_lookup(struct eb_root *root, const void *x, unsigned int len);
+struct ebmb_node *ebmb_insert(struct eb_root *root, struct ebmb_node *new, unsigned int len);
+struct ebmb_node *ebmb_lookup_longest(struct eb_root *root, const void *x);
+struct ebmb_node *ebmb_lookup_prefix(struct eb_root *root, const void *x, unsigned int pfx);
+struct ebmb_node *ebmb_insert_prefix(struct eb_root *root, struct ebmb_node *new, unsigned int len);
+
+/* start from a valid leaf and find the next matching prefix that's either a
+ * duplicate, or immediately shorter than the node's current one and still
+ * matches it. The purpose is to permit a caller that is not satisfied with a
+ * result provided by ebmb_lookup_longest() to evaluate the next matching
+ * entry. Given that shorter keys are necessarily attached to nodes located
+ * above the current one, it's sufficient to restart from the current leaf and
+ * go up until we find a shorter prefix, or a non-matching one.
+ */
+static inline struct ebmb_node *ebmb_lookup_shorter(struct ebmb_node *start)
+{
+ eb_troot_t *t = start->node.leaf_p;
+ struct ebmb_node *node;
+
+ /* first, check for duplicates */
+ node = ebmb_next_dup(start);
+ if (node)
+ return node;
+
+ while (1) {
+ if (eb_gettag(t) == EB_LEFT) {
+ /* Walking up from left branch. We must ensure that we never
+ * walk beyond root.
+ */
+ if (unlikely(eb_clrtag((eb_untag(t, EB_LEFT))->b[EB_RGHT]) == NULL))
+ return NULL;
+ node = container_of(eb_root_to_node(eb_untag(t, EB_LEFT)), struct ebmb_node, node);
+ } else {
+ /* Walking up from right branch, so we cannot be below
+ * root. However, if we end up on a node with an even
+ * and positive bit, this is a cover node, which mandates
+ * that the left branch only contains cover values, so we
+ * must descend it.
+ */
+ node = container_of(eb_root_to_node(eb_untag(t, EB_RGHT)), struct ebmb_node, node);
+ if (node->node.bit > 0 && !(node->node.bit & 1))
+ return ebmb_entry(eb_walk_down(t, EB_LEFT), struct ebmb_node, node);
+ }
+
+ /* Note that <t> cannot be NULL at this stage */
+ t = node->node.node_p;
+
+ /* this is a node attached to a deeper (and possibly different)
+ * leaf, not interesting for us.
+ */
+ if (node->node.pfx >= start->node.pfx)
+ continue;
+
+ if (check_bits(start->key, node->key, 0, node->node.pfx) == 0)
+ break;
+ }
+ return node;
+}
+
+/* The following functions are less likely to be used directly, because their
+ * code is larger. The non-inlined version is preferred.
+ */
+
+/* Delete node from the tree if it was linked in. Mark the node unused. */
+static forceinline void __ebmb_delete(struct ebmb_node *ebmb)
+{
+ __eb_delete(&ebmb->node);
+}
+
+/* Find the first occurrence of a key of a least <len> bytes matching <x> in the
+ * tree <root>. The caller is responsible for ensuring that <len> will not exceed
+ * the common parts between the tree's keys and <x>. In case of multiple matches,
+ * the leftmost node is returned. This means that this function can be used to
+ * lookup string keys by prefix if all keys in the tree are zero-terminated. If
+ * no match is found, NULL is returned. Returns first node if <len> is zero.
+ */
+static forceinline struct ebmb_node *__ebmb_lookup(struct eb_root *root, const void *x, unsigned int len)
+{
+ struct ebmb_node *node;
+ eb_troot_t *troot;
+ int pos, side;
+ int node_bit;
+
+ troot = root->b[EB_LEFT];
+ if (unlikely(troot == NULL))
+ goto ret_null;
+
+ if (unlikely(len == 0))
+ goto walk_down;
+
+ pos = 0;
+ while (1) {
+ if (eb_gettag(troot) == EB_LEAF) {
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct ebmb_node, node.branches);
+ if (eb_memcmp(node->key + pos, x, len) != 0)
+ goto ret_null;
+ else
+ goto ret_node;
+ }
+ node = container_of(eb_untag(troot, EB_NODE),
+ struct ebmb_node, node.branches);
+
+ node_bit = node->node.bit;
+ if (node_bit < 0) {
+ /* We have a dup tree now. Either it's for the same
+ * value, and we walk down left, or it's a different
+ * one and we don't have our key.
+ */
+ if (eb_memcmp(node->key + pos, x, len) != 0)
+ goto ret_null;
+ else
+ goto walk_left;
+ }
+
+ /* OK, normal data node, let's walk down. We check if all full
+ * bytes are equal, and we start from the last one we did not
+ * completely check. We stop as soon as we reach the last byte,
+ * because we must decide to go left/right or abort.
+ */
+ node_bit = ~node_bit + (pos << 3) + 8; // = (pos<<3) + (7 - node_bit)
+ if (node_bit < 0) {
+ /* This surprising construction gives better performance
+ * because gcc does not try to reorder the loop. Tested to
+ * be fine with 2.95 to 4.2.
+ */
+ while (1) {
+ if (node->key[pos++] ^ *(unsigned char*)(x++))
+ goto ret_null; /* more than one full byte is different */
+ if (--len == 0)
+ goto walk_left; /* return first node if all bytes matched */
+ node_bit += 8;
+ if (node_bit >= 0)
+ break;
+ }
+ }
+
+ /* here we know that only the last byte differs, so node_bit < 8.
+ * We have 2 possibilities :
+ * - more than the last bit differs => return NULL
+ * - walk down on side = (x[pos] >> node_bit) & 1
+ */
+ side = *(unsigned char *)x >> node_bit;
+ if (((node->key[pos] >> node_bit) ^ side) > 1)
+ goto ret_null;
+ side &= 1;
+ troot = node->node.branches.b[side];
+ }
+ walk_left:
+ troot = node->node.branches.b[EB_LEFT];
+ walk_down:
+ while (eb_gettag(troot) != EB_LEAF)
+ troot = (eb_untag(troot, EB_NODE))->b[EB_LEFT];
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct ebmb_node, node.branches);
+ ret_node:
+ return node;
+ ret_null:
+ return NULL;
+}
+
+/* Insert ebmb_node <new> into subtree starting at node root <root>.
+ * Only new->key needs be set with the key. The ebmb_node is returned.
+ * If root->b[EB_RGHT]==1, the tree may only contain unique keys. The
+ * len is specified in bytes. It is absolutely mandatory that this length
+ * is the same for all keys in the tree. This function cannot be used to
+ * insert strings.
+ */
+static forceinline struct ebmb_node *
+__ebmb_insert(struct eb_root *root, struct ebmb_node *new, unsigned int len)
+{
+ struct ebmb_node *old;
+ unsigned int side;
+ eb_troot_t *troot, **up_ptr;
+ eb_troot_t *root_right;
+ int diff;
+ int bit;
+ eb_troot_t *new_left, *new_rght;
+ eb_troot_t *new_leaf;
+ int old_node_bit;
+
+ side = EB_LEFT;
+ troot = root->b[EB_LEFT];
+ root_right = root->b[EB_RGHT];
+ if (unlikely(troot == NULL)) {
+ /* Tree is empty, insert the leaf part below the left branch */
+ root->b[EB_LEFT] = eb_dotag(&new->node.branches, EB_LEAF);
+ new->node.leaf_p = eb_dotag(root, EB_LEFT);
+ new->node.node_p = NULL; /* node part unused */
+ return new;
+ }
+
+ /* The tree descent is fairly easy :
+ * - first, check if we have reached a leaf node
+ * - second, check if we have gone too far
+ * - third, reiterate
+ * Everywhere, we use <new> for the node node we are inserting, <root>
+ * for the node we attach it to, and <old> for the node we are
+ * displacing below <new>. <troot> will always point to the future node
+ * (tagged with its type). <side> carries the side the node <new> is
+ * attached to below its parent, which is also where previous node
+ * was attached.
+ */
+
+ bit = 0;
+ while (1) {
+ if (unlikely(eb_gettag(troot) == EB_LEAF)) {
+ /* insert above a leaf */
+ old = container_of(eb_untag(troot, EB_LEAF),
+ struct ebmb_node, node.branches);
+ new->node.node_p = old->node.leaf_p;
+ up_ptr = &old->node.leaf_p;
+ goto check_bit_and_break;
+ }
+
+ /* OK we're walking down this link */
+ old = container_of(eb_untag(troot, EB_NODE),
+ struct ebmb_node, node.branches);
+ old_node_bit = old->node.bit;
+
+ if (unlikely(old->node.bit < 0)) {
+ /* We're above a duplicate tree, so we must compare the whole value */
+ new->node.node_p = old->node.node_p;
+ up_ptr = &old->node.node_p;
+ check_bit_and_break:
+ bit = equal_bits(new->key, old->key, bit, len << 3);
+ break;
+ }
+
+ /* Stop going down when we don't have common bits anymore. We
+ * also stop in front of a duplicates tree because it means we
+ * have to insert above. Note: we can compare more bits than
+ * the current node's because as long as they are identical, we
+ * know we descend along the correct side.
+ */
+
+ bit = equal_bits(new->key, old->key, bit, old_node_bit);
+ if (unlikely(bit < old_node_bit)) {
+ /* The tree did not contain the key, so we insert <new> before the
+ * node <old>, and set ->bit to designate the lowest bit position in
+ * <new> which applies to ->branches.b[].
+ */
+ new->node.node_p = old->node.node_p;
+ up_ptr = &old->node.node_p;
+ break;
+ }
+ /* we don't want to skip bits for further comparisons, so we must limit <bit>.
+ * However, since we're going down around <old_node_bit>, we know it will be
+ * properly matched, so we can skip this bit.
+ */
+ bit = old_node_bit + 1;
+
+ /* walk down */
+ root = &old->node.branches;
+ side = old_node_bit & 7;
+ side ^= 7;
+ side = (new->key[old_node_bit >> 3] >> side) & 1;
+ troot = root->b[side];
+ }
+
+ new_left = eb_dotag(&new->node.branches, EB_LEFT);
+ new_rght = eb_dotag(&new->node.branches, EB_RGHT);
+ new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
+
+ new->node.bit = bit;
+
+ /* Note: we can compare more bits than the current node's because as
+ * long as they are identical, we know we descend along the correct
+ * side. However we don't want to start to compare past the end.
+ */
+ diff = 0;
+ if (((unsigned)bit >> 3) < len)
+ diff = cmp_bits(new->key, old->key, bit);
+
+ if (diff == 0) {
+ new->node.bit = -1; /* mark as new dup tree, just in case */
+
+ if (likely(eb_gettag(root_right))) {
+ /* we refuse to duplicate this key if the tree is
+ * tagged as containing only unique keys.
+ */
+ return old;
+ }
+
+ if (eb_gettag(troot) != EB_LEAF) {
+ /* there was already a dup tree below */
+ struct eb_node *ret;
+ ret = eb_insert_dup(&old->node, &new->node);
+ return container_of(ret, struct ebmb_node, node);
+ }
+ /* otherwise fall through */
+ }
+
+ if (diff >= 0) {
+ new->node.branches.b[EB_LEFT] = troot;
+ new->node.branches.b[EB_RGHT] = new_leaf;
+ new->node.leaf_p = new_rght;
+ *up_ptr = new_left;
+ }
+ else {
+ new->node.branches.b[EB_LEFT] = new_leaf;
+ new->node.branches.b[EB_RGHT] = troot;
+ new->node.leaf_p = new_left;
+ *up_ptr = new_rght;
+ }
+
+ /* Ok, now we are inserting <new> between <root> and <old>. <old>'s
+ * parent is already set to <new>, and the <root>'s branch is still in
+ * <side>. Update the root's leaf till we have it. Note that we can also
+ * find the side by checking the side of new->node.node_p.
+ */
+
+ root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
+ return new;
+}
+
+
+/* Find the first occurrence of the longest prefix matching a key <x> in the
+ * tree <root>. It's the caller's responsibility to ensure that key <x> is at
+ * least as long as the keys in the tree. Note that this can be ensured by
+ * having a byte at the end of <x> which cannot be part of any prefix, typically
+ * the trailing zero for a string. If none can be found, return NULL.
+ */
+static forceinline struct ebmb_node *__ebmb_lookup_longest(struct eb_root *root, const void *x)
+{
+ struct ebmb_node *node;
+ eb_troot_t *troot, *cover;
+ int pos, side;
+ int node_bit;
+
+ troot = root->b[EB_LEFT];
+ if (unlikely(troot == NULL))
+ return NULL;
+
+ cover = NULL;
+ pos = 0;
+ while (1) {
+ if ((eb_gettag(troot) == EB_LEAF)) {
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct ebmb_node, node.branches);
+ if (check_bits(x - pos, node->key, pos, node->node.pfx))
+ goto not_found;
+
+ return node;
+ }
+ node = container_of(eb_untag(troot, EB_NODE),
+ struct ebmb_node, node.branches);
+
+ node_bit = node->node.bit;
+ if (node_bit < 0) {
+ /* We have a dup tree now. Either it's for the same
+ * value, and we walk down left, or it's a different
+ * one and we don't have our key.
+ */
+ if (check_bits(x - pos, node->key, pos, node->node.pfx))
+ goto not_found;
+
+ troot = node->node.branches.b[EB_LEFT];
+ while (eb_gettag(troot) != EB_LEAF)
+ troot = (eb_untag(troot, EB_NODE))->b[EB_LEFT];
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct ebmb_node, node.branches);
+ return node;
+ }
+
+ node_bit >>= 1; /* strip cover bit */
+ node_bit = ~node_bit + (pos << 3) + 8; // = (pos<<3) + (7 - node_bit)
+ if (node_bit < 0) {
+ /* This uncommon construction gives better performance
+ * because gcc does not try to reorder the loop. Tested to
+ * be fine with 2.95 to 4.2.
+ */
+ while (1) {
+ x++; pos++;
+ if (node->key[pos-1] ^ *(unsigned char*)(x-1))
+ goto not_found; /* more than one full byte is different */
+ node_bit += 8;
+ if (node_bit >= 0)
+ break;
+ }
+ }
+
+ /* here we know that only the last byte differs, so 0 <= node_bit <= 7.
+ * We have 2 possibilities :
+ * - more than the last bit differs => data does not match
+ * - walk down on side = (x[pos] >> node_bit) & 1
+ */
+ side = *(unsigned char *)x >> node_bit;
+ if (((node->key[pos] >> node_bit) ^ side) > 1)
+ goto not_found;
+
+ if (!(node->node.bit & 1)) {
+ /* This is a cover node, let's keep a reference to it
+ * for later. The covering subtree is on the left, and
+ * the covered subtree is on the right, so we have to
+ * walk down right.
+ */
+ cover = node->node.branches.b[EB_LEFT];
+ troot = node->node.branches.b[EB_RGHT];
+ continue;
+ }
+ side &= 1;
+ troot = node->node.branches.b[side];
+ }
+
+ not_found:
+ /* Walk down last cover tree if it exists. It does not matter if cover is NULL */
+ return ebmb_entry(eb_walk_down(cover, EB_LEFT), struct ebmb_node, node);
+}
+
+
+/* Find the first occurrence of a prefix matching a key <x> of <pfx> BITS in the
+ * tree <root>. It's the caller's responsibility to ensure that key <x> is at
+ * least as long as the keys in the tree. Note that this can be ensured by
+ * having a byte at the end of <x> which cannot be part of any prefix, typically
+ * the trailing zero for a string. If none can be found, return NULL.
+ */
+static forceinline struct ebmb_node *__ebmb_lookup_prefix(struct eb_root *root, const void *x, unsigned int pfx)
+{
+ struct ebmb_node *node;
+ eb_troot_t *troot;
+ int pos, side;
+ int node_bit;
+
+ troot = root->b[EB_LEFT];
+ if (unlikely(troot == NULL))
+ return NULL;
+
+ pos = 0;
+ while (1) {
+ if ((eb_gettag(troot) == EB_LEAF)) {
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct ebmb_node, node.branches);
+ if (node->node.pfx != pfx)
+ return NULL;
+ if (check_bits(x - pos, node->key, pos, node->node.pfx))
+ return NULL;
+ return node;
+ }
+ node = container_of(eb_untag(troot, EB_NODE),
+ struct ebmb_node, node.branches);
+
+ node_bit = node->node.bit;
+ if (node_bit < 0) {
+ /* We have a dup tree now. Either it's for the same
+ * value, and we walk down left, or it's a different
+ * one and we don't have our key.
+ */
+ if (node->node.pfx != pfx)
+ return NULL;
+ if (check_bits(x - pos, node->key, pos, node->node.pfx))
+ return NULL;
+
+ troot = node->node.branches.b[EB_LEFT];
+ while (eb_gettag(troot) != EB_LEAF)
+ troot = (eb_untag(troot, EB_NODE))->b[EB_LEFT];
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct ebmb_node, node.branches);
+ return node;
+ }
+
+ node_bit >>= 1; /* strip cover bit */
+ node_bit = ~node_bit + (pos << 3) + 8; // = (pos<<3) + (7 - node_bit)
+ if (node_bit < 0) {
+ /* This uncommon construction gives better performance
+ * because gcc does not try to reorder the loop. Tested to
+ * be fine with 2.95 to 4.2.
+ */
+ while (1) {
+ x++; pos++;
+ if (node->key[pos-1] ^ *(unsigned char*)(x-1))
+ return NULL; /* more than one full byte is different */
+ node_bit += 8;
+ if (node_bit >= 0)
+ break;
+ }
+ }
+
+ /* here we know that only the last byte differs, so 0 <= node_bit <= 7.
+ * We have 2 possibilities :
+ * - more than the last bit differs => data does not match
+ * - walk down on side = (x[pos] >> node_bit) & 1
+ */
+ side = *(unsigned char *)x >> node_bit;
+ if (((node->key[pos] >> node_bit) ^ side) > 1)
+ return NULL;
+
+ if (!(node->node.bit & 1)) {
+ /* This is a cover node, it may be the entry we're
+ * looking for. We already know that it matches all the
+ * bits, let's compare prefixes and descend the cover
+ * subtree if they match.
+ */
+ if ((unsigned short)node->node.bit >> 1 == pfx)
+ troot = node->node.branches.b[EB_LEFT];
+ else
+ troot = node->node.branches.b[EB_RGHT];
+ continue;
+ }
+ side &= 1;
+ troot = node->node.branches.b[side];
+ }
+}
+
+
+/* Insert ebmb_node <new> into a prefix subtree starting at node root <root>.
+ * Only new->key and new->pfx need be set with the key and its prefix length.
+ * Note that bits between <pfx> and <len> are theoretically ignored and should be
+ * zero, as it is not certain yet that they will always be ignored everywhere
+ * (eg in bit compare functions).
+ * The ebmb_node is returned.
+ * If root->b[EB_RGHT]==1, the tree may only contain unique keys. The
+ * len is specified in bytes.
+ */
+static forceinline struct ebmb_node *
+__ebmb_insert_prefix(struct eb_root *root, struct ebmb_node *new, unsigned int len)
+{
+ struct ebmb_node *old;
+ unsigned int side;
+ eb_troot_t *troot, **up_ptr;
+ eb_troot_t *root_right;
+ int diff;
+ int bit;
+ eb_troot_t *new_left, *new_rght;
+ eb_troot_t *new_leaf;
+ int old_node_bit;
+ unsigned int npfx = new->node.pfx;
+ unsigned int npfx1 = npfx << 1;
+ const unsigned char *nkey = new->key;
+
+ side = EB_LEFT;
+ troot = root->b[EB_LEFT];
+ root_right = root->b[EB_RGHT];
+ if (unlikely(troot == NULL)) {
+ /* Tree is empty, insert the leaf part below the left branch */
+ root->b[EB_LEFT] = eb_dotag(&new->node.branches, EB_LEAF);
+ new->node.leaf_p = eb_dotag(root, EB_LEFT);
+ new->node.node_p = NULL; /* node part unused */
+ return new;
+ }
+
+ len <<= 3;
+ if (len > npfx)
+ len = npfx;
+
+ /* The tree descent is fairly easy :
+ * - first, check if we have reached a leaf node
+ * - second, check if we have gone too far
+ * - third, reiterate
+ * Everywhere, we use <new> for the node node we are inserting, <root>
+ * for the node we attach it to, and <old> for the node we are
+ * displacing below <new>. <troot> will always point to the future node
+ * (tagged with its type). <side> carries the side the node <new> is
+ * attached to below its parent, which is also where previous node
+ * was attached.
+ */
+
+ bit = 0;
+ while (1) {
+ if (unlikely(eb_gettag(troot) == EB_LEAF)) {
+ /* Insert above a leaf. Note that this leaf could very
+ * well be part of a cover node.
+ */
+ old = container_of(eb_untag(troot, EB_LEAF),
+ struct ebmb_node, node.branches);
+ new->node.node_p = old->node.leaf_p;
+ up_ptr = &old->node.leaf_p;
+ goto check_bit_and_break;
+ }
+
+ /* OK we're walking down this link */
+ old = container_of(eb_untag(troot, EB_NODE),
+ struct ebmb_node, node.branches);
+ old_node_bit = old->node.bit;
+ /* Note that old_node_bit can be :
+ * < 0 : dup tree
+ * = 2N : cover node for N bits
+ * = 2N+1 : normal node at N bits
+ */
+
+ if (unlikely(old_node_bit < 0)) {
+ /* We're above a duplicate tree, so we must compare the whole value */
+ new->node.node_p = old->node.node_p;
+ up_ptr = &old->node.node_p;
+ check_bit_and_break:
+ /* No need to compare everything if the leaves are shorter than the new one. */
+ if (len > old->node.pfx)
+ len = old->node.pfx;
+ bit = equal_bits(nkey, old->key, bit, len);
+ break;
+ }
+
+ /* WARNING: for the two blocks below, <bit> is counted in half-bits */
+
+ bit = equal_bits(nkey, old->key, bit, old_node_bit >> 1);
+ bit = (bit << 1) + 1; // assume comparisons with normal nodes
+
+ /* we must always check that our prefix is larger than the nodes
+ * we visit, otherwise we have to stop going down. The following
+ * test is able to stop before both normal and cover nodes.
+ */
+ if (bit >= npfx1 && npfx1 < old_node_bit) {
+ /* insert cover node here on the left */
+ new->node.node_p = old->node.node_p;
+ up_ptr = &old->node.node_p;
+ new->node.bit = npfx1;
+ diff = -1;
+ goto insert_above;
+ }
+
+ if (unlikely(bit < old_node_bit)) {
+ /* The tree did not contain the key, so we insert <new> before the
+ * node <old>, and set ->bit to designate the lowest bit position in
+ * <new> which applies to ->branches.b[]. We know that the bit is not
+ * greater than the prefix length thanks to the test above.
+ */
+ new->node.node_p = old->node.node_p;
+ up_ptr = &old->node.node_p;
+ new->node.bit = bit;
+ diff = cmp_bits(nkey, old->key, bit >> 1);
+ goto insert_above;
+ }
+
+ if (!(old_node_bit & 1)) {
+ /* if we encounter a cover node with our exact prefix length, it's
+ * necessarily the same value, so we insert there as a duplicate on
+ * the left. For that, we go down on the left and the leaf detection
+ * code will finish the job.
+ */
+ if (npfx1 == old_node_bit) {
+ root = &old->node.branches;
+ side = EB_LEFT;
+ troot = root->b[side];
+ continue;
+ }
+
+ /* cover nodes are always walked through on the right */
+ side = EB_RGHT;
+ bit = old_node_bit >> 1; /* recheck that bit */
+ root = &old->node.branches;
+ troot = root->b[side];
+ continue;
+ }
+
+ /* we don't want to skip bits for further comparisons, so we must limit <bit>.
+ * However, since we're going down around <old_node_bit>, we know it will be
+ * properly matched, so we can skip this bit.
+ */
+ old_node_bit >>= 1;
+ bit = old_node_bit + 1;
+
+ /* walk down */
+ root = &old->node.branches;
+ side = old_node_bit & 7;
+ side ^= 7;
+ side = (nkey[old_node_bit >> 3] >> side) & 1;
+ troot = root->b[side];
+ }
+
+ /* Right here, we have 4 possibilities :
+ * - the tree does not contain any leaf matching the
+ * key, and we have new->key < old->key. We insert
+ * new above old, on the left ;
+ *
+ * - the tree does not contain any leaf matching the
+ * key, and we have new->key > old->key. We insert
+ * new above old, on the right ;
+ *
+ * - the tree does contain the key with the same prefix
+ * length. We add the new key next to it as a first
+ * duplicate (since it was alone).
+ *
+ * The last two cases can easily be partially merged.
+ *
+ * - the tree contains a leaf matching the key, we have
+ * to insert above it as a cover node. The leaf with
+ * the shortest prefix becomes the left subtree and
+ * the leaf with the longest prefix becomes the right
+ * one. The cover node gets the min of both prefixes
+ * as its new bit.
+ */
+
+ /* first we want to ensure that we compare the correct bit, which means
+ * the largest common to both nodes.
+ */
+ if (bit > npfx)
+ bit = npfx;
+ if (bit > old->node.pfx)
+ bit = old->node.pfx;
+
+ new->node.bit = (bit << 1) + 1; /* assume normal node by default */
+
+ /* if one prefix is included in the second one, we don't compare bits
+ * because they won't necessarily match, we just proceed with a cover
+ * node insertion.
+ */
+ diff = 0;
+ if (bit < old->node.pfx && bit < npfx)
+ diff = cmp_bits(nkey, old->key, bit);
+
+ if (diff == 0) {
+ /* Both keys match. Either it's a duplicate entry or we have to
+ * put the shortest prefix left and the largest one right below
+ * a new cover node. By default, diff==0 means we'll be inserted
+ * on the right.
+ */
+ new->node.bit--; /* anticipate cover node insertion */
+ if (npfx == old->node.pfx) {
+ new->node.bit = -1; /* mark as new dup tree, just in case */
+
+ if (unlikely(eb_gettag(root_right))) {
+ /* we refuse to duplicate this key if the tree is
+ * tagged as containing only unique keys.
+ */
+ return old;
+ }
+
+ if (eb_gettag(troot) != EB_LEAF) {
+ /* there was already a dup tree below */
+ struct eb_node *ret;
+ ret = eb_insert_dup(&old->node, &new->node);
+ return container_of(ret, struct ebmb_node, node);
+ }
+ /* otherwise fall through to insert first duplicate */
+ }
+ /* otherwise we just rely on the tests below to select the right side */
+ else if (npfx < old->node.pfx)
+ diff = -1; /* force insertion to left side */
+ }
+
+ insert_above:
+ new_left = eb_dotag(&new->node.branches, EB_LEFT);
+ new_rght = eb_dotag(&new->node.branches, EB_RGHT);
+ new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
+
+ if (diff >= 0) {
+ new->node.branches.b[EB_LEFT] = troot;
+ new->node.branches.b[EB_RGHT] = new_leaf;
+ new->node.leaf_p = new_rght;
+ *up_ptr = new_left;
+ }
+ else {
+ new->node.branches.b[EB_LEFT] = new_leaf;
+ new->node.branches.b[EB_RGHT] = troot;
+ new->node.leaf_p = new_left;
+ *up_ptr = new_rght;
+ }
+
+ root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
+ return new;
+}
+
+
+
+#endif /* _EBMBTREE_H */
+
diff --git a/include/import/ebpttree.h b/include/import/ebpttree.h
new file mode 100644
index 0000000..64816a2
--- /dev/null
+++ b/include/import/ebpttree.h
@@ -0,0 +1,156 @@
+/*
+ * Elastic Binary Trees - macros and structures for operations on pointer nodes.
+ * Version 6.0.6
+ * (C) 2002-2011 - Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _EBPTTREE_H
+#define _EBPTTREE_H
+
+#include "ebtree.h"
+#include "eb32tree.h"
+#include "eb64tree.h"
+
+
+/* Return the structure of type <type> whose member <member> points to <ptr> */
+#define ebpt_entry(ptr, type, member) container_of(ptr, type, member)
+
+/*
+ * Exported functions and macros.
+ * Many of them are always inlined because they are extremely small, and
+ * are generally called at most once or twice in a program.
+ */
+
+/* Return leftmost node in the tree, or NULL if none */
+static forceinline struct ebpt_node *ebpt_first(struct eb_root *root)
+{
+ return ebpt_entry(eb_first(root), struct ebpt_node, node);
+}
+
+/* Return rightmost node in the tree, or NULL if none */
+static forceinline struct ebpt_node *ebpt_last(struct eb_root *root)
+{
+ return ebpt_entry(eb_last(root), struct ebpt_node, node);
+}
+
+/* Return next node in the tree, or NULL if none */
+static forceinline struct ebpt_node *ebpt_next(struct ebpt_node *ebpt)
+{
+ return ebpt_entry(eb_next(&ebpt->node), struct ebpt_node, node);
+}
+
+/* Return previous node in the tree, or NULL if none */
+static forceinline struct ebpt_node *ebpt_prev(struct ebpt_node *ebpt)
+{
+ return ebpt_entry(eb_prev(&ebpt->node), struct ebpt_node, node);
+}
+
+/* Return next leaf node within a duplicate sub-tree, or NULL if none. */
+static inline struct ebpt_node *ebpt_next_dup(struct ebpt_node *ebpt)
+{
+ return ebpt_entry(eb_next_dup(&ebpt->node), struct ebpt_node, node);
+}
+
+/* Return previous leaf node within a duplicate sub-tree, or NULL if none. */
+static inline struct ebpt_node *ebpt_prev_dup(struct ebpt_node *ebpt)
+{
+ return ebpt_entry(eb_prev_dup(&ebpt->node), struct ebpt_node, node);
+}
+
+/* Return next node in the tree, skipping duplicates, or NULL if none */
+static forceinline struct ebpt_node *ebpt_next_unique(struct ebpt_node *ebpt)
+{
+ return ebpt_entry(eb_next_unique(&ebpt->node), struct ebpt_node, node);
+}
+
+/* Return previous node in the tree, skipping duplicates, or NULL if none */
+static forceinline struct ebpt_node *ebpt_prev_unique(struct ebpt_node *ebpt)
+{
+ return ebpt_entry(eb_prev_unique(&ebpt->node), struct ebpt_node, node);
+}
+
+/* Delete node from the tree if it was linked in. Mark the node unused. Note
+ * that this function relies on a non-inlined generic function: eb_delete.
+ */
+static forceinline void ebpt_delete(struct ebpt_node *ebpt)
+{
+ eb_delete(&ebpt->node);
+}
+
+/*
+ * The following functions are inlined but derived from the integer versions.
+ */
+static forceinline struct ebpt_node *ebpt_lookup(struct eb_root *root, void *x)
+{
+ if (sizeof(void *) == 4)
+ return (struct ebpt_node *)eb32_lookup(root, (u32)(PTR_INT_TYPE)x);
+ else
+ return (struct ebpt_node *)eb64_lookup(root, (u64)(PTR_INT_TYPE)x);
+}
+
+static forceinline struct ebpt_node *ebpt_lookup_le(struct eb_root *root, void *x)
+{
+ if (sizeof(void *) == 4)
+ return (struct ebpt_node *)eb32_lookup_le(root, (u32)(PTR_INT_TYPE)x);
+ else
+ return (struct ebpt_node *)eb64_lookup_le(root, (u64)(PTR_INT_TYPE)x);
+}
+
+static forceinline struct ebpt_node *ebpt_lookup_ge(struct eb_root *root, void *x)
+{
+ if (sizeof(void *) == 4)
+ return (struct ebpt_node *)eb32_lookup_ge(root, (u32)(PTR_INT_TYPE)x);
+ else
+ return (struct ebpt_node *)eb64_lookup_ge(root, (u64)(PTR_INT_TYPE)x);
+}
+
+static forceinline struct ebpt_node *ebpt_insert(struct eb_root *root, struct ebpt_node *new)
+{
+ if (sizeof(void *) == 4)
+ return (struct ebpt_node *)eb32_insert(root, (struct eb32_node *)new);
+ else
+ return (struct ebpt_node *)eb64_insert(root, (struct eb64_node *)new);
+}
+
+/*
+ * The following functions are less likely to be used directly, because
+ * their code is larger. The non-inlined version is preferred.
+ */
+
+/* Delete node from the tree if it was linked in. Mark the node unused. */
+static forceinline void __ebpt_delete(struct ebpt_node *ebpt)
+{
+ __eb_delete(&ebpt->node);
+}
+
+static forceinline struct ebpt_node *__ebpt_lookup(struct eb_root *root, void *x)
+{
+ if (sizeof(void *) == 4)
+ return (struct ebpt_node *)__eb32_lookup(root, (u32)(PTR_INT_TYPE)x);
+ else
+ return (struct ebpt_node *)__eb64_lookup(root, (u64)(PTR_INT_TYPE)x);
+}
+
+static forceinline struct ebpt_node *__ebpt_insert(struct eb_root *root, struct ebpt_node *new)
+{
+ if (sizeof(void *) == 4)
+ return (struct ebpt_node *)__eb32_insert(root, (struct eb32_node *)new);
+ else
+ return (struct ebpt_node *)__eb64_insert(root, (struct eb64_node *)new);
+}
+
+#endif /* _EBPT_TREE_H */
diff --git a/include/import/ebsttree.h b/include/import/ebsttree.h
new file mode 100644
index 0000000..db2267b
--- /dev/null
+++ b/include/import/ebsttree.h
@@ -0,0 +1,324 @@
+/*
+ * Elastic Binary Trees - macros to manipulate String data nodes.
+ * Version 6.0.6
+ * (C) 2002-2011 - Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/* These functions and macros rely on Multi-Byte nodes */
+
+#ifndef _EBSTTREE_H
+#define _EBSTTREE_H
+
+#include "ebtree.h"
+#include "ebmbtree.h"
+
+/* The following functions are not inlined by default. They are declared
+ * in ebsttree.c, which simply relies on their inline version.
+ */
+struct ebmb_node *ebst_lookup(struct eb_root *root, const char *x);
+struct ebmb_node *ebst_insert(struct eb_root *root, struct ebmb_node *new);
+
+/* Find the first occurrence of a length <len> string <x> in the tree <root>.
+ * It's the caller's responsibility to use this function only on trees which
+ * only contain zero-terminated strings, and that no null character is present
+ * in string <x> in the first <len> chars. If none can be found, return NULL.
+ */
+static forceinline struct ebmb_node *
+ebst_lookup_len(struct eb_root *root, const char *x, unsigned int len)
+{
+ struct ebmb_node *node;
+
+ node = ebmb_lookup(root, x, len);
+ if (!node || node->key[len] != 0)
+ return NULL;
+ return node;
+}
+
+/* Find the first occurrence of a zero-terminated string <x> in the tree <root>.
+ * It's the caller's responsibility to use this function only on trees which
+ * only contain zero-terminated strings. If none can be found, return NULL.
+ */
+static forceinline struct ebmb_node *__ebst_lookup(struct eb_root *root, const void *x)
+{
+ struct ebmb_node *node;
+ eb_troot_t *troot;
+ int bit;
+ int node_bit;
+
+ troot = root->b[EB_LEFT];
+ if (unlikely(troot == NULL))
+ return NULL;
+
+ bit = 0;
+ while (1) {
+ if ((eb_gettag(troot) == EB_LEAF)) {
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct ebmb_node, node.branches);
+ if (strcmp((char *)node->key, x) == 0)
+ return node;
+ else
+ return NULL;
+ }
+ node = container_of(eb_untag(troot, EB_NODE),
+ struct ebmb_node, node.branches);
+ node_bit = node->node.bit;
+
+ if (node_bit < 0) {
+ /* We have a dup tree now. Either it's for the same
+ * value, and we walk down left, or it's a different
+ * one and we don't have our key.
+ */
+ if (strcmp((char *)node->key, x) != 0)
+ return NULL;
+
+ troot = node->node.branches.b[EB_LEFT];
+ while (eb_gettag(troot) != EB_LEAF)
+ troot = (eb_untag(troot, EB_NODE))->b[EB_LEFT];
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct ebmb_node, node.branches);
+ return node;
+ }
+
+ /* OK, normal data node, let's walk down but don't compare data
+ * if we already reached the end of the key.
+ */
+ if (likely(bit >= 0)) {
+ bit = string_equal_bits(x, node->key, bit);
+ if (likely(bit < node_bit)) {
+ if (bit >= 0)
+ return NULL; /* no more common bits */
+
+ /* bit < 0 : we reached the end of the key. If we
+ * are in a tree with unique keys, we can return
+ * this node. Otherwise we have to walk it down
+ * and stop comparing bits.
+ */
+ if (eb_gettag(root->b[EB_RGHT]))
+ return node;
+ }
+ /* if the bit is larger than the node's, we must bound it
+ * because we might have compared too many bytes with an
+ * inappropriate leaf. For a test, build a tree from "0",
+ * "WW", "W", "S" inserted in this exact sequence and lookup
+ * "W" => "S" is returned without this assignment.
+ */
+ else
+ bit = node_bit;
+ }
+
+ troot = node->node.branches.b[(((unsigned char*)x)[node_bit >> 3] >>
+ (~node_bit & 7)) & 1];
+ }
+}
+
+/* Insert ebmb_node <new> into subtree starting at node root <root>. Only
+ * new->key needs be set with the zero-terminated string key. The ebmb_node is
+ * returned. If root->b[EB_RGHT]==1, the tree may only contain unique keys. The
+ * caller is responsible for properly terminating the key with a zero.
+ */
+static forceinline struct ebmb_node *
+__ebst_insert(struct eb_root *root, struct ebmb_node *new)
+{
+ struct ebmb_node *old;
+ unsigned int side;
+ eb_troot_t *troot;
+ eb_troot_t *root_right;
+ int diff;
+ int bit;
+ int old_node_bit;
+
+ side = EB_LEFT;
+ troot = root->b[EB_LEFT];
+ root_right = root->b[EB_RGHT];
+ if (unlikely(troot == NULL)) {
+ /* Tree is empty, insert the leaf part below the left branch */
+ root->b[EB_LEFT] = eb_dotag(&new->node.branches, EB_LEAF);
+ new->node.leaf_p = eb_dotag(root, EB_LEFT);
+ new->node.node_p = NULL; /* node part unused */
+ return new;
+ }
+
+ /* The tree descent is fairly easy :
+ * - first, check if we have reached a leaf node
+ * - second, check if we have gone too far
+ * - third, reiterate
+ * Everywhere, we use <new> for the node node we are inserting, <root>
+ * for the node we attach it to, and <old> for the node we are
+ * displacing below <new>. <troot> will always point to the future node
+ * (tagged with its type). <side> carries the side the node <new> is
+ * attached to below its parent, which is also where previous node
+ * was attached.
+ */
+
+ bit = 0;
+ while (1) {
+ if (unlikely(eb_gettag(troot) == EB_LEAF)) {
+ eb_troot_t *new_left, *new_rght;
+ eb_troot_t *new_leaf, *old_leaf;
+
+ old = container_of(eb_untag(troot, EB_LEAF),
+ struct ebmb_node, node.branches);
+
+ new_left = eb_dotag(&new->node.branches, EB_LEFT);
+ new_rght = eb_dotag(&new->node.branches, EB_RGHT);
+ new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
+ old_leaf = eb_dotag(&old->node.branches, EB_LEAF);
+
+ new->node.node_p = old->node.leaf_p;
+
+ /* Right here, we have 3 possibilities :
+ * - the tree does not contain the key, and we have
+ * new->key < old->key. We insert new above old, on
+ * the left ;
+ *
+ * - the tree does not contain the key, and we have
+ * new->key > old->key. We insert new above old, on
+ * the right ;
+ *
+ * - the tree does contain the key, which implies it
+ * is alone. We add the new key next to it as a
+ * first duplicate.
+ *
+ * The last two cases can easily be partially merged.
+ */
+ if (bit >= 0)
+ bit = string_equal_bits(new->key, old->key, bit);
+
+ if (bit < 0) {
+ /* key was already there */
+
+ /* we may refuse to duplicate this key if the tree is
+ * tagged as containing only unique keys.
+ */
+ if (eb_gettag(root_right))
+ return old;
+
+ /* new arbitrarily goes to the right and tops the dup tree */
+ old->node.leaf_p = new_left;
+ new->node.leaf_p = new_rght;
+ new->node.branches.b[EB_LEFT] = old_leaf;
+ new->node.branches.b[EB_RGHT] = new_leaf;
+ new->node.bit = -1;
+ root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
+ return new;
+ }
+
+ diff = cmp_bits(new->key, old->key, bit);
+ if (diff < 0) {
+ /* new->key < old->key, new takes the left */
+ new->node.leaf_p = new_left;
+ old->node.leaf_p = new_rght;
+ new->node.branches.b[EB_LEFT] = new_leaf;
+ new->node.branches.b[EB_RGHT] = old_leaf;
+ } else {
+ /* new->key > old->key, new takes the right */
+ old->node.leaf_p = new_left;
+ new->node.leaf_p = new_rght;
+ new->node.branches.b[EB_LEFT] = old_leaf;
+ new->node.branches.b[EB_RGHT] = new_leaf;
+ }
+ break;
+ }
+
+ /* OK we're walking down this link */
+ old = container_of(eb_untag(troot, EB_NODE),
+ struct ebmb_node, node.branches);
+ old_node_bit = old->node.bit;
+
+ /* Stop going down when we don't have common bits anymore. We
+ * also stop in front of a duplicates tree because it means we
+ * have to insert above. Note: we can compare more bits than
+ * the current node's because as long as they are identical, we
+ * know we descend along the correct side.
+ */
+ if (bit >= 0 && (bit < old_node_bit || old_node_bit < 0))
+ bit = string_equal_bits(new->key, old->key, bit);
+
+ if (unlikely(bit < 0)) {
+ /* Perfect match, we must only stop on head of dup tree
+ * or walk down to a leaf.
+ */
+ if (old_node_bit < 0) {
+ /* We know here that string_equal_bits matched all
+ * bits and that we're on top of a dup tree, then
+ * we can perform the dup insertion and return.
+ */
+ struct eb_node *ret;
+ ret = eb_insert_dup(&old->node, &new->node);
+ return container_of(ret, struct ebmb_node, node);
+ }
+ /* OK so let's walk down */
+ }
+ else if (bit < old_node_bit || old_node_bit < 0) {
+ /* The tree did not contain the key, or we stopped on top of a dup
+ * tree, possibly containing the key. In the former case, we insert
+ * <new> before the node <old>, and set ->bit to designate the lowest
+ * bit position in <new> which applies to ->branches.b[]. In the later
+ * case, we add the key to the existing dup tree. Note that we cannot
+ * enter here if we match an intermediate node's key that is not the
+ * head of a dup tree.
+ */
+ eb_troot_t *new_left, *new_rght;
+ eb_troot_t *new_leaf, *old_node;
+
+ new_left = eb_dotag(&new->node.branches, EB_LEFT);
+ new_rght = eb_dotag(&new->node.branches, EB_RGHT);
+ new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
+ old_node = eb_dotag(&old->node.branches, EB_NODE);
+
+ new->node.node_p = old->node.node_p;
+
+ /* we can never match all bits here */
+ diff = cmp_bits(new->key, old->key, bit);
+ if (diff < 0) {
+ new->node.leaf_p = new_left;
+ old->node.node_p = new_rght;
+ new->node.branches.b[EB_LEFT] = new_leaf;
+ new->node.branches.b[EB_RGHT] = old_node;
+ }
+ else {
+ old->node.node_p = new_left;
+ new->node.leaf_p = new_rght;
+ new->node.branches.b[EB_LEFT] = old_node;
+ new->node.branches.b[EB_RGHT] = new_leaf;
+ }
+ break;
+ }
+
+ /* walk down */
+ root = &old->node.branches;
+ side = (new->key[old_node_bit >> 3] >> (~old_node_bit & 7)) & 1;
+ troot = root->b[side];
+ }
+
+ /* Ok, now we are inserting <new> between <root> and <old>. <old>'s
+ * parent is already set to <new>, and the <root>'s branch is still in
+ * <side>. Update the root's leaf till we have it. Note that we can also
+ * find the side by checking the side of new->node.node_p.
+ */
+
+ /* We need the common higher bits between new->key and old->key.
+ * This number of bits is already in <bit>.
+ * NOTE: we can't get here whit bit < 0 since we found a dup !
+ */
+ new->node.bit = bit;
+ root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
+ return new;
+}
+
+#endif /* _EBSTTREE_H */
+
diff --git a/include/import/ebtree-t.h b/include/import/ebtree-t.h
new file mode 100644
index 0000000..b695426
--- /dev/null
+++ b/include/import/ebtree-t.h
@@ -0,0 +1,217 @@
+/*
+ * Elastic Binary Trees - types
+ * Version 6.0.6
+ * (C) 2002-2011 - Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _EBTREE_T_H
+#define _EBTREE_T_H
+
+#include <haproxy/api-t.h>
+
+/*
+ * generic types for ebtree
+ */
+
+/* Number of bits per node, and number of leaves per node */
+#define EB_NODE_BITS 1
+#define EB_NODE_BRANCHES (1 << EB_NODE_BITS)
+#define EB_NODE_BRANCH_MASK (EB_NODE_BRANCHES - 1)
+
+/* Be careful not to tweak those values. The walking code is optimized for NULL
+ * detection on the assumption that the following values are intact.
+ */
+#define EB_LEFT 0
+#define EB_RGHT 1
+#define EB_LEAF 0
+#define EB_NODE 1
+
+/* Tags to set in root->b[EB_RGHT] :
+ * - EB_NORMAL is a normal tree which stores duplicate keys.
+ * - EB_UNIQUE is a tree which stores unique keys.
+ */
+#define EB_NORMAL 0
+#define EB_UNIQUE 1
+
+/* This is the same as an eb_node pointer, except that the lower bit embeds
+ * a tag. See eb_dotag()/eb_untag()/eb_gettag(). This tag has two meanings :
+ * - 0=left, 1=right to designate the parent's branch for leaf_p/node_p
+ * - 0=link, 1=leaf to designate the branch's type for branch[]
+ */
+typedef void eb_troot_t;
+
+/* The eb_root connects the node which contains it, to two nodes below it, one
+ * of which may be the same node. At the top of the tree, we use an eb_root
+ * too, which always has its right branch NULL (+/1 low-order bits).
+ */
+struct eb_root {
+ eb_troot_t *b[EB_NODE_BRANCHES]; /* left and right branches */
+};
+
+/* The eb_node contains the two parts, one for the leaf, which always exists,
+ * and one for the node, which remains unused in the very first node inserted
+ * into the tree. This structure is 20 bytes per node on 32-bit machines. Do
+ * not change the order, benchmarks have shown that it's optimal this way.
+ * Note: be careful about this struct's alignment if it gets included into
+ * another struct and some atomic ops are expected on the keys or the node.
+ */
+struct eb_node {
+ struct eb_root branches; /* branches, must be at the beginning */
+ eb_troot_t *node_p; /* link node's parent */
+ eb_troot_t *leaf_p; /* leaf node's parent */
+ short int bit; /* link's bit position. */
+ short unsigned int pfx; /* data prefix length, always related to leaf */
+} __attribute__((packed));
+
+
+/* The root of a tree is an eb_root initialized with both pointers NULL.
+ * During its life, only the left pointer will change. The right one will
+ * always remain NULL, which is the way we detect it.
+ */
+#define EB_ROOT \
+ (struct eb_root) { \
+ .b = {[0] = NULL, [1] = NULL }, \
+ }
+
+#define EB_ROOT_UNIQUE \
+ (struct eb_root) { \
+ .b = {[0] = NULL, [1] = (void *)1 }, \
+ }
+
+#define EB_TREE_HEAD(name) \
+ struct eb_root name = EB_ROOT
+
+
+/*
+ * types for eb32tree
+ */
+
+#define EB32_ROOT EB_ROOT
+#define EB32_TREE_HEAD EB_TREE_HEAD
+
+/* These types may sometimes already be defined */
+typedef unsigned int u32;
+typedef signed int s32;
+
+/* This structure carries a node, a leaf, and a key. It must start with the
+ * eb_node so that it can be cast into an eb_node. We could also have put some
+ * sort of transparent union here to reduce the indirection level, but the fact
+ * is, the end user is not meant to manipulate internals, so this is pointless.
+ */
+struct eb32_node {
+ struct eb_node node; /* the tree node, must be at the beginning */
+ MAYBE_ALIGN(sizeof(u32));
+ u32 key;
+} ALIGNED(sizeof(void*));
+
+/* This structure carries a node, a leaf, a scope, and a key. It must start
+ * with the eb_node so that it can be cast into an eb_node. We could also
+ * have put some sort of transparent union here to reduce the indirection
+ * level, but the fact is, the end user is not meant to manipulate internals,
+ * so this is pointless.
+ * In case sizeof(void*)>=sizeof(long), we know there will be some padding after
+ * the leaf if it's unaligned. In this case we force the alignment on void* so
+ * that we prefer to have the padding before for more efficient accesses.
+ */
+struct eb32sc_node {
+ struct eb_node node; /* the tree node, must be at the beginning */
+ MAYBE_ALIGN(sizeof(u32));
+ u32 key;
+ ALWAYS_ALIGN(sizeof(void*));
+ unsigned long node_s; /* visibility of this node's branches */
+ unsigned long leaf_s; /* visibility of this node's leaf */
+} ALIGNED(sizeof(void*));
+
+/*
+ * types for eb64tree
+ */
+
+#define EB64_ROOT EB_ROOT
+#define EB64_TREE_HEAD EB_TREE_HEAD
+
+/* These types may sometimes already be defined */
+typedef unsigned long long u64;
+typedef signed long long s64;
+
+/* This structure carries a node, a leaf, and a key. It must start with the
+ * eb_node so that it can be cast into an eb_node. We could also have put some
+ * sort of transparent union here to reduce the indirection level, but the fact
+ * is, the end user is not meant to manipulate internals, so this is pointless.
+ * In case sizeof(void*)>=sizeof(u64), we know there will be some padding after
+ * the key if it's unaligned. In this case we force the alignment on void* so
+ * that we prefer to have the padding before for more efficient accesses.
+ */
+struct eb64_node {
+ struct eb_node node; /* the tree node, must be at the beginning */
+ MAYBE_ALIGN(sizeof(u64));
+ ALWAYS_ALIGN(sizeof(void*));
+ u64 key;
+} ALIGNED(sizeof(void*));
+
+#define EBPT_ROOT EB_ROOT
+#define EBPT_TREE_HEAD EB_TREE_HEAD
+
+/* on *almost* all platforms, a pointer can be cast into a size_t which is unsigned */
+#ifndef PTR_INT_TYPE
+#define PTR_INT_TYPE size_t
+#endif
+
+/*
+ * types for ebpttree
+ */
+
+typedef PTR_INT_TYPE ptr_t;
+
+/* This structure carries a node, a leaf, and a key. It must start with the
+ * eb_node so that it can be cast into an eb_node. We could also have put some
+ * sort of transparent union here to reduce the indirection level, but the fact
+ * is, the end user is not meant to manipulate internals, so this is pointless.
+ * Internally, it is automatically cast as an eb32_node or eb64_node.
+ * We always align the key since the struct itself will be padded to the same
+ * size anyway.
+ */
+struct ebpt_node {
+ struct eb_node node; /* the tree node, must be at the beginning */
+ ALWAYS_ALIGN(sizeof(void*));
+ void *key;
+} ALIGNED(sizeof(void*));
+
+/*
+ * types for ebmbtree
+ */
+
+#define EBMB_ROOT EB_ROOT
+#define EBMB_TREE_HEAD EB_TREE_HEAD
+
+/* This structure carries a node, a leaf, and a key. It must start with the
+ * eb_node so that it can be cast into an eb_node. We could also have put some
+ * sort of transparent union here to reduce the indirection level, but the fact
+ * is, the end user is not meant to manipulate internals, so this is pointless.
+ * The 'node.bit' value here works differently from scalar types, as it contains
+ * the number of identical bits between the two branches.
+ * Note that we take a great care of making sure the key is located exactly at
+ * the end of the struct even if that involves holes before it, so that it
+ * always aliases any external key a user would append after. This is why the
+ * key uses the same alignment as the struct.
+ */
+struct ebmb_node {
+ struct eb_node node; /* the tree node, must be at the beginning */
+ ALWAYS_ALIGN(sizeof(void*));
+ unsigned char key[0]; /* the key, its size depends on the application */
+} ALIGNED(sizeof(void*));
+
+#endif /* _EB_TREE_T_H */
diff --git a/include/import/ebtree.h b/include/import/ebtree.h
new file mode 100644
index 0000000..d6e51d5
--- /dev/null
+++ b/include/import/ebtree.h
@@ -0,0 +1,857 @@
+/*
+ * Elastic Binary Trees - generic macros and structures.
+ * Version 6.0.6
+ * (C) 2002-2011 - Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+
+/*
+ General idea:
+ -------------
+ In a radix binary tree, we may have up to 2N-1 nodes for N keys if all of
+ them are leaves. If we find a way to differentiate intermediate nodes (later
+ called "nodes") and final nodes (later called "leaves"), and we associate
+ them by two, it is possible to build sort of a self-contained radix tree with
+ intermediate nodes always present. It will not be as cheap as the ultree for
+ optimal cases as shown below, but the optimal case almost never happens :
+
+ Eg, to store 8, 10, 12, 13, 14 :
+
+ ultree this theoretical tree
+
+ 8 8
+ / \ / \
+ 10 12 10 12
+ / \ / \
+ 13 14 12 14
+ / \
+ 12 13
+
+ Note that on real-world tests (with a scheduler), is was verified that the
+ case with data on an intermediate node never happens. This is because the
+ data spectrum is too large for such coincidences to happen. It would require
+ for instance that a task has its expiration time at an exact second, with
+ other tasks sharing that second. This is too rare to try to optimize for it.
+
+ What is interesting is that the node will only be added above the leaf when
+ necessary, which implies that it will always remain somewhere above it. So
+ both the leaf and the node can share the exact value of the leaf, because
+ when going down the node, the bit mask will be applied to comparisons. So we
+ are tempted to have one single key shared between the node and the leaf.
+
+ The bit only serves the nodes, and the dups only serve the leaves. So we can
+ put a lot of information in common. This results in one single entity with
+ two branch pointers and two parent pointers, one for the node part, and one
+ for the leaf part :
+
+ node's leaf's
+ parent parent
+ | |
+ [node] [leaf]
+ / \
+ left right
+ branch branch
+
+ The node may very well refer to its leaf counterpart in one of its branches,
+ indicating that its own leaf is just below it :
+
+ node's
+ parent
+ |
+ [node]
+ / \
+ left [leaf]
+ branch
+
+ Adding keys in such a tree simply consists in inserting nodes between
+ other nodes and/or leaves :
+
+ [root]
+ |
+ [node2]
+ / \
+ [leaf1] [node3]
+ / \
+ [leaf2] [leaf3]
+
+ On this diagram, we notice that [node2] and [leaf2] have been pulled away
+ from each other due to the insertion of [node3], just as if there would be
+ an elastic between both parts. This elastic-like behaviour gave its name to
+ the tree : "Elastic Binary Tree", or "EBtree". The entity which associates a
+ node part and a leaf part will be called an "EB node".
+
+ We also notice on the diagram that there is a root entity required to attach
+ the tree. It only contains two branches and there is nothing above it. This
+ is an "EB root". Some will note that [leaf1] has no [node1]. One property of
+ the EBtree is that all nodes have their branches filled, and that if a node
+ has only one branch, it does not need to exist. Here, [leaf1] was added
+ below [root] and did not need any node.
+
+ An EB node contains :
+ - a pointer to the node's parent (node_p)
+ - a pointer to the leaf's parent (leaf_p)
+ - two branches pointing to lower nodes or leaves (branches)
+ - a bit position (bit)
+ - an optional key.
+
+ The key here is optional because it's used only during insertion, in order
+ to classify the nodes. Nothing else in the tree structure requires knowledge
+ of the key. This makes it possible to write type-agnostic primitives for
+ everything, and type-specific insertion primitives. This has led to consider
+ two types of EB nodes. The type-agnostic ones will serve as a header for the
+ other ones, and will simply be called "struct eb_node". The other ones will
+ have their type indicated in the structure name. Eg: "struct eb32_node" for
+ nodes carrying 32 bit keys.
+
+ We will also node that the two branches in a node serve exactly the same
+ purpose as an EB root. For this reason, a "struct eb_root" will be used as
+ well inside the struct eb_node. In order to ease pointer manipulation and
+ ROOT detection when walking upwards, all the pointers inside an eb_node will
+ point to the eb_root part of the referenced EB nodes, relying on the same
+ principle as the linked lists in Linux.
+
+ Another important point to note, is that when walking inside a tree, it is
+ very convenient to know where a node is attached in its parent, and what
+ type of branch it has below it (leaf or node). In order to simplify the
+ operations and to speed up the processing, it was decided in this specific
+ implementation to use the lowest bit from the pointer to designate the side
+ of the upper pointers (left/right) and the type of a branch (leaf/node).
+ This practise is not mandatory by design, but an implementation-specific
+ optimisation permitted on all platforms on which data must be aligned. All
+ known 32 bit platforms align their integers and pointers to 32 bits, leaving
+ the two lower bits unused. So, we say that the pointers are "tagged". And
+ since they designate pointers to root parts, we simply call them
+ "tagged root pointers", or "eb_troot" in the code.
+
+ Duplicate keys are stored in a special manner. When inserting a key, if
+ the same one is found, then an incremental binary tree is built at this
+ place from these keys. This ensures that no special case has to be written
+ to handle duplicates when walking through the tree or when deleting entries.
+ It also guarantees that duplicates will be walked in the exact same order
+ they were inserted. This is very important when trying to achieve fair
+ processing distribution for instance.
+
+ Algorithmic complexity can be derived from 3 variables :
+ - the number of possible different keys in the tree : P
+ - the number of entries in the tree : N
+ - the number of duplicates for one key : D
+
+ Note that this tree is deliberately NOT balanced. For this reason, the worst
+ case may happen with a small tree (eg: 32 distinct keys of one bit). BUT,
+ the operations required to manage such data are so much cheap that they make
+ it worth using it even under such conditions. For instance, a balanced tree
+ may require only 6 levels to store those 32 keys when this tree will
+ require 32. But if per-level operations are 5 times cheaper, it wins.
+
+ Minimal, Maximal and Average times are specified in number of operations.
+ Minimal is given for best condition, Maximal for worst condition, and the
+ average is reported for a tree containing random keys. An operation
+ generally consists in jumping from one node to the other.
+
+ Complexity :
+ - lookup : min=1, max=log(P), avg=log(N)
+ - insertion from root : min=1, max=log(P), avg=log(N)
+ - insertion of dups : min=1, max=log(D), avg=log(D)/2 after lookup
+ - deletion : min=1, max=1, avg=1
+ - prev/next : min=1, max=log(P), avg=2 :
+ N/2 nodes need 1 hop => 1*N/2
+ N/4 nodes need 2 hops => 2*N/4
+ N/8 nodes need 3 hops => 3*N/8
+ ...
+ N/x nodes need log(x) hops => log2(x)*N/x
+ Total cost for all N nodes : sum[i=1..N](log2(i)*N/i) = N*sum[i=1..N](log2(i)/i)
+ Average cost across N nodes = total / N = sum[i=1..N](log2(i)/i) = 2
+
+ This design is currently limited to only two branches per node. Most of the
+ tree descent algorithm would be compatible with more branches (eg: 4, to cut
+ the height in half), but this would probably require more complex operations
+ and the deletion algorithm would be problematic.
+
+ Useful properties :
+ - a node is always added above the leaf it is tied to, and never can get
+ below nor in another branch. This implies that leaves directly attached
+ to the root do not use their node part, which is indicated by a NULL
+ value in node_p. This also enhances the cache efficiency when walking
+ down the tree, because when the leaf is reached, its node part will
+ already have been visited (unless it's the first leaf in the tree).
+
+ - pointers to lower nodes or leaves are stored in "branch" pointers. Only
+ the root node may have a NULL in either branch, it is not possible for
+ other branches. Since the nodes are attached to the left branch of the
+ root, it is not possible to see a NULL left branch when walking up a
+ tree. Thus, an empty tree is immediately identified by a NULL left
+ branch at the root. Conversely, the one and only way to identify the
+ root node is to check that it right branch is NULL. Note that the
+ NULL pointer may have a few low-order bits set.
+
+ - a node connected to its own leaf will have branch[0|1] pointing to
+ itself, and leaf_p pointing to itself.
+
+ - a node can never have node_p pointing to itself.
+
+ - a node is linked in a tree if and only if it has a non-null leaf_p.
+
+ - a node can never have both branches equal, except for the root which can
+ have them both NULL.
+
+ - deletion only applies to leaves. When a leaf is deleted, its parent must
+ be released too (unless it's the root), and its sibling must attach to
+ the grand-parent, replacing the parent. Also, when a leaf is deleted,
+ the node tied to this leaf will be removed and must be released too. If
+ this node is different from the leaf's parent, the freshly released
+ leaf's parent will be used to replace the node which must go. A released
+ node will never be used anymore, so there's no point in tracking it.
+
+ - the bit index in a node indicates the bit position in the key which is
+ represented by the branches. That means that a node with (bit == 0) is
+ just above two leaves. Negative bit values are used to build a duplicate
+ tree. The first node above two identical leaves gets (bit == -1). This
+ value logarithmically decreases as the duplicate tree grows. During
+ duplicate insertion, a node is inserted above the highest bit value (the
+ lowest absolute value) in the tree during the right-sided walk. If bit
+ -1 is not encountered (highest < -1), we insert above last leaf.
+ Otherwise, we insert above the node with the highest value which was not
+ equal to the one of its parent + 1.
+
+ - the "eb_next" primitive walks from left to right, which means from lower
+ to higher keys. It returns duplicates in the order they were inserted.
+ The "eb_first" primitive returns the left-most entry.
+
+ - the "eb_prev" primitive walks from right to left, which means from
+ higher to lower keys. It returns duplicates in the opposite order they
+ were inserted. The "eb_last" primitive returns the right-most entry.
+
+ - a tree which has 1 in the lower bit of its root's right branch is a
+ tree with unique nodes. This means that when a node is inserted with
+ a key which already exists will not be inserted, and the previous
+ entry will be returned.
+
+ */
+
+#ifndef _EBTREE_H
+#define _EBTREE_H
+
+#include <stdlib.h>
+#include <import/ebtree-t.h>
+#include <haproxy/api.h>
+
+static inline int flsnz8_generic(unsigned int x)
+{
+ int ret = 0;
+ if (x >> 4) { x >>= 4; ret += 4; }
+ return ret + ((0xFFFFAA50U >> (x << 1)) & 3) + 1;
+}
+
+/* Note: we never need to run fls on null keys, so we can optimize the fls
+ * function by removing a conditional jump.
+ */
+#if defined(__i386__) || defined(__x86_64__)
+/* this code is similar on 32 and 64 bit */
+static inline int flsnz(int x)
+{
+ int r;
+ __asm__("bsrl %1,%0\n"
+ : "=r" (r) : "rm" (x));
+ return r+1;
+}
+
+static inline int flsnz8(unsigned char x)
+{
+ int r;
+ __asm__("movzbl %%al, %%eax\n"
+ "bsrl %%eax,%0\n"
+ : "=r" (r) : "a" (x));
+ return r+1;
+}
+
+#else
+// returns 1 to 32 for 1<<0 to 1<<31. Undefined for 0.
+#define flsnz(___a) ({ \
+ register int ___x, ___bits = 0; \
+ ___x = (___a); \
+ if (___x & 0xffff0000) { ___x &= 0xffff0000; ___bits += 16;} \
+ if (___x & 0xff00ff00) { ___x &= 0xff00ff00; ___bits += 8;} \
+ if (___x & 0xf0f0f0f0) { ___x &= 0xf0f0f0f0; ___bits += 4;} \
+ if (___x & 0xcccccccc) { ___x &= 0xcccccccc; ___bits += 2;} \
+ if (___x & 0xaaaaaaaa) { ___x &= 0xaaaaaaaa; ___bits += 1;} \
+ ___bits + 1; \
+ })
+
+static inline int flsnz8(unsigned int x)
+{
+ return flsnz8_generic(x);
+}
+
+
+#endif
+
+static inline int fls64(unsigned long long x)
+{
+ unsigned int h;
+ unsigned int bits = 32;
+
+ h = x >> 32;
+ if (!h) {
+ h = x;
+ bits = 0;
+ }
+ return flsnz(h) + bits;
+}
+
+#define fls_auto(x) ((sizeof(x) > 4) ? fls64(x) : flsnz(x))
+
+/* Linux-like "container_of". It returns a pointer to the structure of type
+ * <type> which has its member <name> stored at address <ptr>.
+ */
+#ifndef container_of
+#define container_of(ptr, type, name) ((type *)(((void *)(ptr)) - ((long)&((type *)0)->name)))
+#endif
+
+/* returns a pointer to the structure of type <type> which has its member <name>
+ * stored at address <ptr>, unless <ptr> is 0, in which case 0 is returned.
+ */
+#ifndef container_of_safe
+#define container_of_safe(ptr, type, name) \
+ ({ void *__p = (ptr); \
+ __p ? (type *)(__p - ((long)&((type *)0)->name)) : (type *)0; \
+ })
+#endif
+
+/* Return the structure of type <type> whose member <member> points to <ptr> */
+#define eb_entry(ptr, type, member) container_of(ptr, type, member)
+
+/***************************************\
+ * Private functions. Not for end-user *
+\***************************************/
+
+/* Converts a root pointer to its equivalent eb_troot_t pointer,
+ * ready to be stored in ->branch[], leaf_p or node_p. NULL is not
+ * conserved. To be used with EB_LEAF, EB_NODE, EB_LEFT or EB_RGHT in <tag>.
+ */
+static inline eb_troot_t *eb_dotag(const struct eb_root *root, const int tag)
+{
+ return (eb_troot_t *)((void *)root + tag);
+}
+
+/* Converts an eb_troot_t pointer pointer to its equivalent eb_root pointer,
+ * for use with pointers from ->branch[], leaf_p or node_p. NULL is conserved
+ * as long as the tree is not corrupted. To be used with EB_LEAF, EB_NODE,
+ * EB_LEFT or EB_RGHT in <tag>.
+ */
+static inline struct eb_root *eb_untag(const eb_troot_t *troot, const int tag)
+{
+ return (struct eb_root *)((void *)troot - tag);
+}
+
+/* returns the tag associated with an eb_troot_t pointer */
+static inline int eb_gettag(eb_troot_t *troot)
+{
+ return (unsigned long)troot & 1;
+}
+
+/* Converts a root pointer to its equivalent eb_troot_t pointer and clears the
+ * tag, no matter what its value was.
+ */
+static inline struct eb_root *eb_clrtag(const eb_troot_t *troot)
+{
+ return (struct eb_root *)((unsigned long)troot & ~1UL);
+}
+
+/* Returns a pointer to the eb_node holding <root> */
+static inline struct eb_node *eb_root_to_node(struct eb_root *root)
+{
+ return container_of(root, struct eb_node, branches);
+}
+
+/* Walks down starting at root pointer <start>, and always walking on side
+ * <side>. It either returns the node hosting the first leaf on that side,
+ * or NULL if no leaf is found. <start> may either be NULL or a branch pointer.
+ * The pointer to the leaf (or NULL) is returned.
+ */
+static inline struct eb_node *eb_walk_down(eb_troot_t *start, unsigned int side)
+{
+ /* A NULL pointer on an empty tree root will be returned as-is */
+ while (eb_gettag(start) == EB_NODE)
+ start = (eb_untag(start, EB_NODE))->b[side];
+ /* NULL is left untouched (root==eb_node, EB_LEAF==0) */
+ return eb_root_to_node(eb_untag(start, EB_LEAF));
+}
+
+/* This function is used to build a tree of duplicates by adding a new node to
+ * a subtree of at least 2 entries. It will probably never be needed inlined,
+ * and it is not for end-user.
+ */
+static forceinline struct eb_node *
+__eb_insert_dup(struct eb_node *sub, struct eb_node *new)
+{
+ struct eb_node *head = sub;
+
+ eb_troot_t *new_left = eb_dotag(&new->branches, EB_LEFT);
+ eb_troot_t *new_rght = eb_dotag(&new->branches, EB_RGHT);
+ eb_troot_t *new_leaf = eb_dotag(&new->branches, EB_LEAF);
+
+ /* first, identify the deepest hole on the right branch */
+ while (eb_gettag(head->branches.b[EB_RGHT]) != EB_LEAF) {
+ struct eb_node *last = head;
+ head = container_of(eb_untag(head->branches.b[EB_RGHT], EB_NODE),
+ struct eb_node, branches);
+ if (head->bit > last->bit + 1)
+ sub = head; /* there's a hole here */
+ }
+
+ /* Here we have a leaf attached to (head)->b[EB_RGHT] */
+ if (head->bit < -1) {
+ /* A hole exists just before the leaf, we insert there */
+ new->bit = -1;
+ sub = container_of(eb_untag(head->branches.b[EB_RGHT], EB_LEAF),
+ struct eb_node, branches);
+ head->branches.b[EB_RGHT] = eb_dotag(&new->branches, EB_NODE);
+
+ new->node_p = sub->leaf_p;
+ new->leaf_p = new_rght;
+ sub->leaf_p = new_left;
+ new->branches.b[EB_LEFT] = eb_dotag(&sub->branches, EB_LEAF);
+ new->branches.b[EB_RGHT] = new_leaf;
+ return new;
+ } else {
+ int side;
+ /* No hole was found before a leaf. We have to insert above
+ * <sub>. Note that we cannot be certain that <sub> is attached
+ * to the right of its parent, as this is only true if <sub>
+ * is inside the dup tree, not at the head.
+ */
+ new->bit = sub->bit - 1; /* install at the lowest level */
+ side = eb_gettag(sub->node_p);
+ head = container_of(eb_untag(sub->node_p, side), struct eb_node, branches);
+ head->branches.b[side] = eb_dotag(&new->branches, EB_NODE);
+
+ new->node_p = sub->node_p;
+ new->leaf_p = new_rght;
+ sub->node_p = new_left;
+ new->branches.b[EB_LEFT] = eb_dotag(&sub->branches, EB_NODE);
+ new->branches.b[EB_RGHT] = new_leaf;
+ return new;
+ }
+}
+
+
+/**************************************\
+ * Public functions, for the end-user *
+\**************************************/
+
+/* Return non-zero if the tree is empty, otherwise zero */
+static inline int eb_is_empty(const struct eb_root *root)
+{
+ return !root->b[EB_LEFT];
+}
+
+/* Return non-zero if the node is a duplicate, otherwise zero */
+static inline int eb_is_dup(const struct eb_node *node)
+{
+ return node->bit < 0;
+}
+
+/* Return the first leaf in the tree starting at <root>, or NULL if none */
+static inline struct eb_node *eb_first(struct eb_root *root)
+{
+ return eb_walk_down(root->b[0], EB_LEFT);
+}
+
+/* Return the last leaf in the tree starting at <root>, or NULL if none */
+static inline struct eb_node *eb_last(struct eb_root *root)
+{
+ return eb_walk_down(root->b[0], EB_RGHT);
+}
+
+/* Return previous leaf node before an existing leaf node, or NULL if none. */
+static inline struct eb_node *eb_prev(struct eb_node *node)
+{
+ eb_troot_t *t = node->leaf_p;
+
+ while (eb_gettag(t) == EB_LEFT) {
+ /* Walking up from left branch. We must ensure that we never
+ * walk beyond root.
+ */
+ if (unlikely(eb_clrtag((eb_untag(t, EB_LEFT))->b[EB_RGHT]) == NULL))
+ return NULL;
+ t = (eb_root_to_node(eb_untag(t, EB_LEFT)))->node_p;
+ }
+ /* Note that <t> cannot be NULL at this stage */
+ t = (eb_untag(t, EB_RGHT))->b[EB_LEFT];
+ return eb_walk_down(t, EB_RGHT);
+}
+
+/* Return next leaf node after an existing leaf node, or NULL if none. */
+static inline struct eb_node *eb_next(struct eb_node *node)
+{
+ eb_troot_t *t = node->leaf_p;
+
+ while (eb_gettag(t) != EB_LEFT)
+ /* Walking up from right branch, so we cannot be below root */
+ t = (eb_root_to_node(eb_untag(t, EB_RGHT)))->node_p;
+
+ /* Note that <t> cannot be NULL at this stage */
+ t = (eb_untag(t, EB_LEFT))->b[EB_RGHT];
+ if (eb_clrtag(t) == NULL)
+ return NULL;
+ return eb_walk_down(t, EB_LEFT);
+}
+
+/* Return previous leaf node within a duplicate sub-tree, or NULL if none. */
+static inline struct eb_node *eb_prev_dup(struct eb_node *node)
+{
+ eb_troot_t *t = node->leaf_p;
+
+ while (eb_gettag(t) == EB_LEFT) {
+ /* Walking up from left branch. We must ensure that we never
+ * walk beyond root.
+ */
+ if (unlikely(eb_clrtag((eb_untag(t, EB_LEFT))->b[EB_RGHT]) == NULL))
+ return NULL;
+ /* if the current node leaves a dup tree, quit */
+ if ((eb_root_to_node(eb_untag(t, EB_LEFT)))->bit >= 0)
+ return NULL;
+ t = (eb_root_to_node(eb_untag(t, EB_LEFT)))->node_p;
+ }
+ /* Note that <t> cannot be NULL at this stage */
+ if ((eb_root_to_node(eb_untag(t, EB_RGHT)))->bit >= 0)
+ return NULL;
+ t = (eb_untag(t, EB_RGHT))->b[EB_LEFT];
+ return eb_walk_down(t, EB_RGHT);
+}
+
+/* Return next leaf node within a duplicate sub-tree, or NULL if none. */
+static inline struct eb_node *eb_next_dup(struct eb_node *node)
+{
+ eb_troot_t *t = node->leaf_p;
+
+ while (eb_gettag(t) != EB_LEFT) {
+ /* Walking up from right branch, so we cannot be below root */
+ /* if the current node leaves a dup tree, quit */
+ if ((eb_root_to_node(eb_untag(t, EB_RGHT)))->bit >= 0)
+ return NULL;
+ t = (eb_root_to_node(eb_untag(t, EB_RGHT)))->node_p;
+ }
+
+ /* Note that <t> cannot be NULL at this stage. If our leaf is directly
+ * under the root, we must not try to cast the leaf_p into a eb_node*
+ * since it is a pointer to an eb_root.
+ */
+ if (eb_clrtag((eb_untag(t, EB_LEFT))->b[EB_RGHT]) == NULL)
+ return NULL;
+
+ if ((eb_root_to_node(eb_untag(t, EB_LEFT)))->bit >= 0)
+ return NULL;
+ t = (eb_untag(t, EB_LEFT))->b[EB_RGHT];
+ return eb_walk_down(t, EB_LEFT);
+}
+
+/* Return previous leaf node before an existing leaf node, skipping duplicates,
+ * or NULL if none. */
+static inline struct eb_node *eb_prev_unique(struct eb_node *node)
+{
+ eb_troot_t *t = node->leaf_p;
+
+ while (1) {
+ if (eb_gettag(t) != EB_LEFT) {
+ node = eb_root_to_node(eb_untag(t, EB_RGHT));
+ /* if we're right and not in duplicates, stop here */
+ if (node->bit >= 0)
+ break;
+ t = node->node_p;
+ }
+ else {
+ /* Walking up from left branch. We must ensure that we never
+ * walk beyond root.
+ */
+ if (unlikely(eb_clrtag((eb_untag(t, EB_LEFT))->b[EB_RGHT]) == NULL))
+ return NULL;
+ t = (eb_root_to_node(eb_untag(t, EB_LEFT)))->node_p;
+ }
+ }
+ /* Note that <t> cannot be NULL at this stage */
+ t = (eb_untag(t, EB_RGHT))->b[EB_LEFT];
+ return eb_walk_down(t, EB_RGHT);
+}
+
+/* Return next leaf node after an existing leaf node, skipping duplicates, or
+ * NULL if none.
+ */
+static inline struct eb_node *eb_next_unique(struct eb_node *node)
+{
+ eb_troot_t *t = node->leaf_p;
+
+ while (1) {
+ if (eb_gettag(t) == EB_LEFT) {
+ if (unlikely(eb_clrtag((eb_untag(t, EB_LEFT))->b[EB_RGHT]) == NULL))
+ return NULL; /* we reached root */
+ node = eb_root_to_node(eb_untag(t, EB_LEFT));
+ /* if we're left and not in duplicates, stop here */
+ if (node->bit >= 0)
+ break;
+ t = node->node_p;
+ }
+ else {
+ /* Walking up from right branch, so we cannot be below root */
+ t = (eb_root_to_node(eb_untag(t, EB_RGHT)))->node_p;
+ }
+ }
+
+ /* Note that <t> cannot be NULL at this stage */
+ t = (eb_untag(t, EB_LEFT))->b[EB_RGHT];
+ if (eb_clrtag(t) == NULL)
+ return NULL;
+ return eb_walk_down(t, EB_LEFT);
+}
+
+
+/* Removes a leaf node from the tree if it was still in it. Marks the node
+ * as unlinked.
+ */
+static forceinline void __eb_delete(struct eb_node *node)
+{
+ __label__ delete_unlink;
+ unsigned int pside, gpside, sibtype;
+ struct eb_node *parent;
+ struct eb_root *gparent;
+
+ if (!node->leaf_p)
+ return;
+
+ /* we need the parent, our side, and the grand parent */
+ pside = eb_gettag(node->leaf_p);
+ parent = eb_root_to_node(eb_untag(node->leaf_p, pside));
+
+ /* We likely have to release the parent link, unless it's the root,
+ * in which case we only set our branch to NULL. Note that we can
+ * only be attached to the root by its left branch.
+ */
+
+ if (eb_clrtag(parent->branches.b[EB_RGHT]) == NULL) {
+ /* we're just below the root, it's trivial. */
+ parent->branches.b[EB_LEFT] = NULL;
+ goto delete_unlink;
+ }
+
+ /* To release our parent, we have to identify our sibling, and reparent
+ * it directly to/from the grand parent. Note that the sibling can
+ * either be a link or a leaf.
+ */
+
+ gpside = eb_gettag(parent->node_p);
+ gparent = eb_untag(parent->node_p, gpside);
+
+ gparent->b[gpside] = parent->branches.b[!pside];
+ sibtype = eb_gettag(gparent->b[gpside]);
+
+ if (sibtype == EB_LEAF) {
+ eb_root_to_node(eb_untag(gparent->b[gpside], EB_LEAF))->leaf_p =
+ eb_dotag(gparent, gpside);
+ } else {
+ eb_root_to_node(eb_untag(gparent->b[gpside], EB_NODE))->node_p =
+ eb_dotag(gparent, gpside);
+ }
+ /* Mark the parent unused. Note that we do not check if the parent is
+ * our own node, but that's not a problem because if it is, it will be
+ * marked unused at the same time, which we'll use below to know we can
+ * safely remove it.
+ */
+ parent->node_p = NULL;
+
+ /* The parent node has been detached, and is currently unused. It may
+ * belong to another node, so we cannot remove it that way. Also, our
+ * own node part might still be used. so we can use this spare node
+ * to replace ours if needed.
+ */
+
+ /* If our link part is unused, we can safely exit now */
+ if (!node->node_p)
+ goto delete_unlink;
+
+ /* From now on, <node> and <parent> are necessarily different, and the
+ * <node>'s node part is in use. By definition, <parent> is at least
+ * below <node>, so keeping its key for the bit string is OK.
+ */
+
+ parent->node_p = node->node_p;
+ parent->branches = node->branches;
+ parent->bit = node->bit;
+
+ /* We must now update the new node's parent... */
+ gpside = eb_gettag(parent->node_p);
+ gparent = eb_untag(parent->node_p, gpside);
+ gparent->b[gpside] = eb_dotag(&parent->branches, EB_NODE);
+
+ /* ... and its branches */
+ for (pside = 0; pside <= 1; pside++) {
+ if (eb_gettag(parent->branches.b[pside]) == EB_NODE) {
+ eb_root_to_node(eb_untag(parent->branches.b[pside], EB_NODE))->node_p =
+ eb_dotag(&parent->branches, pside);
+ } else {
+ eb_root_to_node(eb_untag(parent->branches.b[pside], EB_LEAF))->leaf_p =
+ eb_dotag(&parent->branches, pside);
+ }
+ }
+ delete_unlink:
+ /* Now the node has been completely unlinked */
+ node->leaf_p = NULL;
+ return; /* tree is not empty yet */
+}
+
+/* Compare blocks <a> and <b> byte-to-byte, from bit <ignore> to bit <len-1>.
+ * Return the number of equal bits between strings, assuming that the first
+ * <ignore> bits are already identical. It is possible to return slightly more
+ * than <len> bits if <len> does not stop on a byte boundary and we find exact
+ * bytes. Note that parts or all of <ignore> bits may be rechecked. It is only
+ * passed here as a hint to speed up the check.
+ */
+static forceinline int equal_bits(const unsigned char *a,
+ const unsigned char *b,
+ int ignore, int len)
+{
+ for (ignore >>= 3, a += ignore, b += ignore, ignore <<= 3;
+ ignore < len; ) {
+ unsigned char c;
+
+ a++; b++;
+ ignore += 8;
+ c = b[-1] ^ a[-1];
+
+ if (c) {
+ /* OK now we know that old and new differ at byte <ptr> and that <c> holds
+ * the bit differences. We have to find what bit is differing and report
+ * it as the number of identical bits. Note that low bit numbers are
+ * assigned to high positions in the byte, as we compare them as strings.
+ */
+ ignore -= flsnz8(c);
+ break;
+ }
+ }
+ return ignore;
+}
+
+/* check that the two blocks <a> and <b> are equal on <len> bits. If it is known
+ * they already are on some bytes, this number of equal bytes to be skipped may
+ * be passed in <skip>. It returns 0 if they match, otherwise non-zero.
+ */
+static forceinline int check_bits(const unsigned char *a,
+ const unsigned char *b,
+ int skip,
+ int len)
+{
+ int bit, ret;
+
+ /* This uncommon construction gives the best performance on x86 because
+ * it makes heavy use multiple-index addressing and parallel instructions,
+ * and it prevents gcc from reordering the loop since it is already
+ * properly oriented. Tested to be fine with 2.95 to 4.2.
+ */
+ bit = ~len + (skip << 3) + 9; // = (skip << 3) + (8 - len)
+ ret = a[skip] ^ b[skip];
+ if (unlikely(bit >= 0))
+ return ret >> bit;
+ while (1) {
+ skip++;
+ if (ret)
+ return ret;
+ ret = a[skip] ^ b[skip];
+ bit += 8;
+ if (bit >= 0)
+ return ret >> bit;
+ }
+}
+
+
+/* Compare strings <a> and <b> byte-to-byte, from bit <ignore> to the last 0.
+ * Return the number of equal bits between strings, assuming that the first
+ * <ignore> bits are already identical. Note that parts or all of <ignore> bits
+ * may be rechecked. It is only passed here as a hint to speed up the check.
+ * The caller is responsible for not passing an <ignore> value larger than any
+ * of the two strings. However, referencing any bit from the trailing zero is
+ * permitted. Equal strings are reported as a negative number of bits, which
+ * indicates the end was reached.
+ */
+static forceinline int string_equal_bits(const unsigned char *a,
+ const unsigned char *b,
+ int ignore)
+{
+ int beg;
+ unsigned char c;
+
+ beg = ignore >> 3;
+
+ /* skip known and identical bits. We stop at the first different byte
+ * or at the first zero we encounter on either side.
+ */
+ while (1) {
+ unsigned char d;
+
+ c = a[beg];
+ d = b[beg];
+ beg++;
+
+ c ^= d;
+ if (c)
+ break;
+ if (!d)
+ return -1;
+ }
+ /* OK now we know that a and b differ at byte <beg>, or that both are zero.
+ * We have to find what bit is differing and report it as the number of
+ * identical bits. Note that low bit numbers are assigned to high positions
+ * in the byte, as we compare them as strings.
+ */
+ return (beg << 3) - flsnz8(c);
+}
+
+static forceinline int cmp_bits(const unsigned char *a, const unsigned char *b, unsigned int pos)
+{
+ unsigned int ofs;
+ unsigned char bit_a, bit_b;
+
+ ofs = pos >> 3;
+ pos = ~pos & 7;
+
+ bit_a = (a[ofs] >> pos) & 1;
+ bit_b = (b[ofs] >> pos) & 1;
+
+ return bit_a - bit_b; /* -1: a<b; 0: a=b; 1: a>b */
+}
+
+static forceinline int get_bit(const unsigned char *a, unsigned int pos)
+{
+ unsigned int ofs;
+
+ ofs = pos >> 3;
+ pos = ~pos & 7;
+ return (a[ofs] >> pos) & 1;
+}
+
+/* These functions are declared in ebtree.c */
+void eb_delete(struct eb_node *node);
+struct eb_node *eb_insert_dup(struct eb_node *sub, struct eb_node *new);
+int eb_memcmp(const void *m1, const void *m2, size_t len);
+
+#endif /* _EB_TREE_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/import/ist.h b/include/import/ist.h
new file mode 100644
index 0000000..16b8616
--- /dev/null
+++ b/include/import/ist.h
@@ -0,0 +1,957 @@
+/*
+ * include/import/ist.h
+ * Very simple indirect string manipulation functions.
+ *
+ * Copyright (C) 2014-2020 Willy Tarreau - w@1wt.eu
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _IMPORT_IST_H
+#define _IMPORT_IST_H
+
+#include <sys/types.h>
+#include <ctype.h>
+#include <stddef.h>
+#include <string.h>
+
+#ifndef IST_FREESTANDING
+#include <stdlib.h>
+#endif
+
+/* ASCII to lower case conversion table */
+#define _IST_LC { \
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, \
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, \
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, \
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, \
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, \
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, \
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, \
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, \
+ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, \
+ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, \
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, \
+ 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, \
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, \
+ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, \
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, \
+ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, \
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, \
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, \
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, \
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, \
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, \
+ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, \
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, \
+ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, \
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, \
+ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, \
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, \
+ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, \
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, \
+ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, \
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, \
+ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, \
+}
+
+/* ASCII to upper case conversion table */
+#define _IST_UC { \
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, \
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, \
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, \
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, \
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, \
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, \
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, \
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, \
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, \
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, \
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, \
+ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, \
+ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, \
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, \
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, \
+ 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, \
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, \
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, \
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, \
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, \
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, \
+ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, \
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, \
+ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, \
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, \
+ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, \
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, \
+ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, \
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, \
+ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, \
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, \
+ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, \
+}
+
+#if defined(USE_OBSOLETE_LINKER) || defined(__TINYC__)
+/* some old linkers and some non-ELF platforms have issues with the weak
+ * attribute so we turn these arrays to literals there. TCC silently ignores
+ * it so we switch to literal as well.
+ */
+#define ist_lc ((const unsigned char[256])_IST_LC)
+#define ist_uc ((const unsigned char[256])_IST_UC)
+#else
+const unsigned char ist_lc[256] __attribute__((weak)) = _IST_LC;
+const unsigned char ist_uc[256] __attribute__((weak)) = _IST_UC;
+#endif
+
+/* This string definition will most often be used to represent a read-only
+ * string returned from a function, based on the starting point and its length
+ * in bytes. No storage is provided, only a pointer and a length. The types
+ * here are important as we only want to have 2 native machine words there so
+ * that on modern architectures the compiler is capable of efficiently
+ * returning a register pair without having to allocate stack room from the
+ * caller. This is done with -freg-struct which is often enabled by default.
+ */
+struct ist {
+ char *ptr;
+ size_t len;
+};
+
+/* makes a constant ist from a constant string, for use in array declarations */
+#define IST(str) { .ptr = str "", .len = (sizeof str "") - 1 }
+
+/* IST_NULL is equivalent to an `ist` with `.ptr = NULL` and `.len = 0` */
+#define IST_NULL ((const struct ist){ .ptr = 0, .len = 0 })
+
+/* makes an ist from a regular zero terminated string. Null has length 0.
+ * Constants are detected and replaced with constant initializers. Other values
+ * are measured by hand without strlen() as it's much cheaper and inlinable on
+ * small strings. The construct is complex because we must never call
+ * __builtin_strlen() with an expression otherwise it involves a real
+ * measurement.
+ */
+#if __GNUC__ >= 4
+// gcc >= 4 detects constant propagation of str through __x and resolves the
+// length of constant strings easily.
+#define ist(str) ({ \
+ char *__x = (void *)(str); \
+ (struct ist){ \
+ .ptr = __x, \
+ .len = __builtin_constant_p(str) ? \
+ ((void *)str == (void *)0) ? 0 : \
+ __builtin_strlen(__x) : \
+ ({ \
+ size_t __l = 0; \
+ if (__x) for (__l--; __x[++__l]; ) ; \
+ __l; \
+ }) \
+ }; \
+})
+#else
+// gcc < 4 can't do this, and the side effect is a warning each time a NULL is
+// passed to ist() due to the check on __builtin_strlen(). It doesn't have the
+// ability to know that this code is never called.
+#define ist(str) ({ \
+ char *__x = (void *)(str); \
+ (struct ist){ \
+ .ptr = __x, \
+ .len = __builtin_constant_p(str) ? \
+ ((void *)str == (void *)0) ? 0 : \
+ __builtin_strlen(str) : \
+ ({ \
+ size_t __l = 0; \
+ if (__x) for (__l--; __x[++__l]; ) ; \
+ __l; \
+ }) \
+ }; \
+})
+#endif
+
+/* makes an ist struct from a string and a length */
+static inline struct ist ist2(const void *ptr, size_t len)
+{
+ return (struct ist){ .ptr = (char *)ptr, .len = len };
+}
+
+/* returns the result of `ist.ptr != NULL` */
+static inline int isttest(const struct ist ist)
+{
+ return ist.ptr != NULL;
+}
+
+/* This function MODIFIES the string to add a zero AFTER the end, and returns
+ * the start pointer. The purpose is to use it on strings extracted by parsers
+ * from larger strings cut with delimiters that are not important and can be
+ * destroyed. It allows any such string to be used with regular string
+ * functions. It's also convenient to use with printf() to show data extracted
+ * from writable areas. The caller is obviously responsible for ensuring that
+ * the string is valid and that the first byte past the end is writable. If
+ * these conditions cannot be satisfied, use istpad() below instead.
+ */
+static inline char *ist0(struct ist ist)
+{
+ ist.ptr[ist.len] = 0;
+ return ist.ptr;
+}
+
+/* returns the pointer of the string */
+static inline char *istptr(const struct ist ist)
+{
+ return ist.ptr;
+}
+
+/* returns the length of the string */
+static inline size_t istlen(const struct ist ist)
+{
+ return ist.len;
+}
+
+/* returns the pointer to the end the string */
+static inline char *istend(const struct ist ist)
+{
+ return (ist.ptr + ist.len);
+}
+
+/* skips to next character in the string, always stops at the end */
+static inline struct ist istnext(const struct ist ist)
+{
+ struct ist ret = ist;
+
+ if (ret.len) {
+ ret.len--;
+ ret.ptr++;
+ }
+ return ret;
+}
+
+/* Returns the first character of the <ist> and advances the <ist> by 1.
+ * If the <ist> is empty the result is undefined.
+ */
+static inline char istshift(struct ist *ist)
+{
+ if (ist->len) {
+ char c = *ist->ptr;
+ *ist = istnext(*ist);
+
+ return c;
+ }
+
+ return 0;
+}
+
+/* copies the contents from string <ist> to buffer <buf> and adds a trailing
+ * zero. The caller must ensure <buf> is large enough.
+ */
+static inline struct ist istpad(void *buf, const struct ist ist)
+{
+ struct ist ret = { .ptr = buf, .len = ist.len };
+
+ for (ret.len = 0; ret.len < ist.len; ret.len++)
+ ret.ptr[ret.len] = ist.ptr[ret.len];
+
+ ret.ptr[ret.len] = 0;
+ return ret;
+}
+
+/* trims string <ist> to no more than <size> characters. The string is
+ * returned.
+ */
+static inline struct ist isttrim(const struct ist ist, size_t size)
+{
+ struct ist ret = ist;
+
+ if (ret.len > size)
+ ret.len = size;
+ return ret;
+}
+
+/* Sets the <len> of the <ist> to zero and returns the previous length.
+ *
+ * This function is meant to be used in functions that receive an ist containing
+ * the destination buffer and the buffer's size. The returned size must be stored
+ * to prevent an overflow of such a destination buffer.
+ *
+ * If you simply want to clear an ist and do not care about the previous length
+ * then you should use `isttrim(ist, 0)`.
+ *
+ * Example Usage (fill the complete buffer with 'x'):
+ *
+ * void my_func(struct ist* dst)
+ * {
+ * size_t dst_size = istclear(dst);
+ * size_t i;
+ *
+ * for (i = 0; i < dst_size; i++)
+ * *dst = __istappend(*dst, 'x');
+ * }
+ */
+__attribute__((warn_unused_result))
+static inline size_t istclear(struct ist* ist)
+{
+ size_t len = ist->len;
+
+ ist->len = 0;
+
+ return len;
+}
+
+/* trims string <ist> to no more than <size>-1 characters and ensures that a
+ * zero is placed after <ist.len> (possibly reduced by one) and before <size>,
+ * unless <size> is already zero. The string is returned. This is mostly aimed
+ * at building printable strings that need to be zero-terminated.
+ */
+static inline struct ist istzero(const struct ist ist, size_t size)
+{
+ struct ist ret = ist;
+
+ if (!size)
+ ret.len = 0;
+ else {
+ if (ret.len > size - 1)
+ ret.len = size - 1;
+ ret.ptr[ret.len] = 0;
+ }
+ return ret;
+}
+
+/* returns the ordinal difference between two strings :
+ * < 0 if ist1 < ist2
+ * = 0 if ist1 == ist2
+ * > 0 if ist1 > ist2
+ */
+static inline int istdiff(const struct ist ist1, const struct ist ist2)
+{
+ struct ist l = ist1;
+ struct ist r = ist2;
+
+ do {
+ if (!l.len--)
+ return -r.len;
+ if (!r.len--)
+ return 1;
+ } while (*l.ptr++ == *r.ptr++);
+
+ return *(unsigned char *)(l.ptr - 1) - *(unsigned char *)(r.ptr - 1);
+}
+
+/* returns non-zero if <ist1> starts like <ist2> (empty strings do match) */
+static inline int istmatch(const struct ist ist1, const struct ist ist2)
+{
+ struct ist l = ist1;
+ struct ist r = ist2;
+
+ if (l.len < r.len)
+ return 0;
+
+ while (r.len--) {
+ if (*l.ptr++ != *r.ptr++)
+ return 0;
+ }
+ return 1;
+}
+
+/* returns non-zero if <ist1> starts like <ist2>, ignoring the case (empty strings do match) */
+static inline int istmatchi(const struct ist ist1, const struct ist ist2)
+{
+ struct ist l = ist1;
+ struct ist r = ist2;
+
+ if (l.len < r.len)
+ return 0;
+
+ while (r.len--) {
+ if (*l.ptr != *r.ptr &&
+ ist_lc[(unsigned char)*l.ptr] != ist_lc[(unsigned char)*r.ptr])
+ return 0;
+
+ l.ptr++;
+ r.ptr++;
+ }
+ return 1;
+}
+
+/* returns non-zero if <ist1> starts like <ist2> on the first <count>
+ * characters (empty strings do match).
+ */
+static inline int istnmatch(const struct ist ist1, const struct ist ist2, size_t count)
+{
+ struct ist l = ist1;
+ struct ist r = ist2;
+
+ if (l.len > count)
+ l.len = count;
+ if (r.len > count)
+ r.len = count;
+ return istmatch(l, r);
+}
+
+/* returns non-zero if <ist1> equals <ist2> (empty strings are equal) */
+static inline int isteq(const struct ist ist1, const struct ist ist2)
+{
+ struct ist l = ist1;
+ struct ist r = ist2;
+
+ if (l.len != r.len)
+ return 0;
+
+ while (l.len--) {
+ if (*l.ptr++ != *r.ptr++)
+ return 0;
+ }
+ return 1;
+}
+
+/* returns non-zero if <ist1> equals <ist2>, ignoring the case (empty strings are equal) */
+static inline int isteqi(const struct ist ist1, const struct ist ist2)
+{
+ struct ist l = ist1;
+ struct ist r = ist2;
+
+ if (l.len != r.len)
+ return 0;
+
+ while (l.len--) {
+ if (*l.ptr != *r.ptr &&
+ ist_lc[(unsigned char)*l.ptr] != ist_lc[(unsigned char)*r.ptr])
+ return 0;
+
+ l.ptr++;
+ r.ptr++;
+ }
+ return 1;
+}
+
+/* returns non-zero if <ist1> equals <ist2> on the first <count> characters
+ * (empty strings are equal).
+ */
+static inline int istneq(const struct ist ist1, const struct ist ist2, size_t count)
+{
+ struct ist l = ist1;
+ struct ist r = ist2;
+
+ if (l.len > count)
+ l.len = count;
+ if (r.len > count)
+ r.len = count;
+ return isteq(l, r);
+}
+
+/* appends <src> after <dst>. The caller must ensure that the underlying buffer
+ * is large enough to fit the character.
+ */
+static inline struct ist __istappend(struct ist dst, const char src)
+{
+ dst.ptr[dst.len++] = src;
+
+ return dst;
+}
+
+/* copies <src> over <dst> for a maximum of <count> bytes. Returns the number
+ * of characters copied (src.len), or -1 if it does not fit. In all cases, the
+ * contents are copied prior to reporting an error, so that the destination
+ * at least contains a valid but truncated string.
+ */
+static inline ssize_t istcpy(struct ist *dst, const struct ist src, size_t count)
+{
+ dst->len = 0;
+
+ if (count > src.len)
+ count = src.len;
+
+ while (dst->len < count) {
+ dst->ptr[dst->len] = src.ptr[dst->len];
+ dst->len++;
+ }
+
+ if (dst->len == src.len)
+ return src.len;
+
+ return -1;
+}
+
+/* copies <src> over <dst> for a maximum of <count> bytes. Returns the number
+ * of characters copied, or -1 if it does not fit. A (possibly truncated) valid
+ * copy of <src> is always left into <dst>, and a trailing \0 is appended as
+ * long as <count> is not null, even if that results in reducing the string by
+ * one character.
+ */
+static inline ssize_t istscpy(struct ist *dst, const struct ist src, size_t count)
+{
+ dst->len = 0;
+
+ if (!count)
+ goto fail;
+
+ if (count > src.len)
+ count = src.len + 1;
+
+ while (dst->len < count - 1) {
+ dst->ptr[dst->len] = src.ptr[dst->len];
+ dst->len++;
+ }
+
+ dst->ptr[dst->len] = 0;
+ if (dst->len == src.len)
+ return src.len;
+ fail:
+ return -1;
+}
+
+/* appends <src> after <dst> for a maximum of <count> total bytes in <dst> after
+ * the copy. <dst> is assumed to be <count> or less before the call. The new
+ * string's length is returned, or -1 if a truncation happened. In all cases,
+ * the contents are copied prior to reporting an error, so that the destination
+ * at least contains a valid but truncated string.
+ */
+static inline ssize_t istcat(struct ist *dst, const struct ist src, size_t count)
+{
+ const char *s = src.ptr;
+
+ while (dst->len < count && s != src.ptr + src.len)
+ dst->ptr[dst->len++] = *s++;
+
+ if (s == src.ptr + src.len)
+ return dst->len;
+
+ return -1;
+}
+
+/* appends <src> after <dst> for a maximum of <count> total bytes in <dst> after
+ * the copy. <dst> is assumed to be <count> or less before the call. The new
+ * string's length is returned, or -1 if a truncation happened. In all cases,
+ * the contents are copied prior to reporting an error, so that the destination
+ * at least contains a valid but truncated string.
+ */
+static inline ssize_t istscat(struct ist *dst, const struct ist src, size_t count)
+{
+ const char *s = src.ptr;
+
+ if (!count)
+ goto fail;
+
+ while (dst->len < count - 1 && s != src.ptr + src.len) {
+ dst->ptr[dst->len++] = *s++;
+ }
+
+ dst->ptr[dst->len] = 0;
+ if (s == src.ptr + src.len)
+ return dst->len;
+ fail:
+ return -1;
+}
+
+/* copies the entire <src> over <dst>, which must be allocated large enough to
+ * hold the whole contents. No trailing zero is appended, this is mainly used
+ * for protocol processing where the frame length has already been checked. An
+ * ist made of the output and its length are returned. The destination is not
+ * touched if src.len is null.
+ */
+static inline struct ist ist2bin(char *dst, const struct ist src)
+{
+ size_t ofs = 0;
+
+ /* discourage the compiler from trying to optimize for large strings,
+ * but tell it that most of our strings are not empty.
+ */
+ if (__builtin_expect(ofs < src.len, 1)) {
+ do {
+ dst[ofs] = src.ptr[ofs];
+ ofs++;
+ } while (__builtin_expect(ofs < src.len, 0));
+ }
+ return ist2(dst, ofs);
+}
+
+/* copies the entire <src> over <dst>, which must be allocated large enough to
+ * hold the whole contents as well as a trailing zero which is always appended.
+ * This is mainly used for protocol conversions where the frame length has
+ * already been checked. An ist made of the output and its length (not counting
+ * the trailing zero) are returned.
+ */
+static inline struct ist ist2str(char *dst, const struct ist src)
+{
+ size_t ofs = 0;
+
+ /* discourage the compiler from trying to optimize for large strings,
+ * but tell it that most of our strings are not empty.
+ */
+ if (__builtin_expect(ofs < src.len, 1)) {
+ do {
+ dst[ofs] = src.ptr[ofs];
+ ofs++;
+ } while (__builtin_expect(ofs < src.len, 0));
+ }
+ dst[ofs] = 0;
+ return ist2(dst, ofs);
+}
+
+/* makes a lower case copy of the entire <src> into <dst>, which must have been
+ * allocated large enough to hold the whole contents. No trailing zero is
+ * appended, this is mainly used for protocol processing where the frame length
+ * has already been checked. An ist made of the output and its length are
+ * returned. The destination is not touched if src.len is null.
+ */
+static inline struct ist ist2bin_lc(char *dst, const struct ist src)
+{
+ size_t ofs = 0;
+
+ /* discourage the compiler from trying to optimize for large strings,
+ * but tell it that most of our strings are not empty.
+ */
+ if (__builtin_expect(ofs < src.len, 1)) {
+ do {
+ dst[ofs] = ist_lc[(unsigned char)src.ptr[ofs]];
+ ofs++;
+ } while (__builtin_expect(ofs < src.len, 0));
+ }
+ return ist2(dst, ofs);
+}
+
+/* makes a lower case copy of the entire <src> into <dst>, which must have been
+ * allocated large enough to hold the whole contents as well as a trailing zero
+ * which is always appended. This is mainly used for protocol conversions where
+ * the frame length has already been checked. An ist made of the output and its
+ * length (not counting the trailing zero) are returned.
+ */
+static inline struct ist ist2str_lc(char *dst, const struct ist src)
+{
+ size_t ofs = 0;
+
+ /* discourage the compiler from trying to optimize for large strings,
+ * but tell it that most of our strings are not empty.
+ */
+ if (__builtin_expect(ofs < src.len, 1)) {
+ do {
+ dst[ofs] = ist_lc[(unsigned char)src.ptr[ofs]];
+ ofs++;
+ } while (__builtin_expect(ofs < src.len, 0));
+ }
+ dst[ofs] = 0;
+ return ist2(dst, ofs);
+}
+
+/* makes an upper case copy of the entire <src> into <dst>, which must have
+ * been allocated large enough to hold the whole contents. No trailing zero is
+ * appended, this is mainly used for protocol processing where the frame length
+ * has already been checked. An ist made of the output and its length are
+ * returned. The destination is not touched if src.len is null.
+ */
+static inline struct ist ist2bin_uc(char *dst, const struct ist src)
+{
+ size_t ofs = 0;
+
+ /* discourage the compiler from trying to optimize for large strings,
+ * but tell it that most of our strings are not empty.
+ */
+ if (__builtin_expect(ofs < src.len, 1)) {
+ do {
+ dst[ofs] = ist_uc[(unsigned char)src.ptr[ofs]];
+ ofs++;
+ } while (__builtin_expect(ofs < src.len, 0));
+ }
+ return ist2(dst, ofs);
+}
+
+/* makes an upper case copy of the entire <src> into <dst>, which must have been
+ * allocated large enough to hold the whole contents as well as a trailing zero
+ * which is always appended. This is mainly used for protocol conversions where
+ * the frame length has already been checked. An ist made of the output and its
+ * length (not counting the trailing zero) are returned.
+ */
+static inline struct ist ist2str_uc(char *dst, const struct ist src)
+{
+ size_t ofs = 0;
+
+ /* discourage the compiler from trying to optimize for large strings,
+ * but tell it that most of our strings are not empty.
+ */
+ if (__builtin_expect(ofs < src.len, 1)) {
+ do {
+ dst[ofs] = ist_uc[(unsigned char)src.ptr[ofs]];
+ ofs++;
+ } while (__builtin_expect(ofs < src.len, 0));
+ }
+ dst[ofs] = 0;
+ return ist2(dst, ofs);
+}
+
+/* looks for first occurrence of character <chr> in string <ist>. Returns the
+ * pointer if found, or NULL if not found.
+ */
+static inline char *istchr(const struct ist ist, char chr)
+{
+ char *s = ist.ptr;
+
+ do {
+ if (s >= ist.ptr + ist.len)
+ return NULL;
+ } while (*s++ != chr);
+ return s - 1;
+}
+
+/* Returns a pointer to the first control character found in <ist>, or NULL if
+ * none is present. A control character is defined as a byte whose value is
+ * between 0x00 and 0x1F included. The function is optimized for strings having
+ * no CTL chars by processing up to sizeof(long) bytes at once on architectures
+ * supporting efficient unaligned accesses. Despite this it is not very fast
+ * (~0.43 byte/cycle) and should mostly be used on low match probability when
+ * it can save a call to a much slower function.
+ */
+static inline const char *ist_find_ctl(const struct ist ist)
+{
+ const union { unsigned long v; } __attribute__((packed)) *u;
+ const char *curr = (void *)ist.ptr - sizeof(long);
+ const char *last = curr + ist.len;
+ unsigned long l1, l2;
+
+ do {
+ curr += sizeof(long);
+ if (curr > last)
+ break;
+ u = (void *)curr;
+ /* subtract 0x202020...20 to the value to generate a carry in
+ * the lower byte if the byte contains a lower value. If we
+ * generate a bit 7 that was not there, it means the byte was
+ * within 0x00..0x1F.
+ */
+ l2 = u->v;
+ l1 = ~l2 & ((~0UL / 255) * 0x80); /* 0x808080...80 */
+ l2 -= (~0UL / 255) * 0x20; /* 0x202020...20 */
+ } while ((l1 & l2) == 0);
+
+ last += sizeof(long);
+ if (__builtin_expect(curr < last, 0)) {
+ do {
+ if ((unsigned char)*curr < 0x20)
+ return curr;
+ curr++;
+ } while (curr < last);
+ }
+ return NULL;
+}
+
+/* Returns a pointer to the first character found <ist> that belongs to the
+ * range [min:max] inclusive, or NULL if none is present. The function is
+ * optimized for strings having no such chars by processing up to sizeof(long)
+ * bytes at once on architectures supporting efficient unaligned accesses.
+ * Despite this it is not very fast (~0.43 byte/cycle) and should mostly be
+ * used on low match probability when it can save a call to a much slower
+ * function. Will not work for characters 0x80 and above. It's optimized for
+ * min and max to be known at build time.
+ */
+static inline const char *ist_find_range(const struct ist ist, unsigned char min, unsigned char max)
+{
+ const union { unsigned long v; } __attribute__((packed)) *u;
+ const char *curr = (void *)ist.ptr - sizeof(long);
+ const char *last = curr + ist.len;
+ unsigned long l1, l2;
+
+ /* easier with an exclusive boundary */
+ max++;
+
+ do {
+ curr += sizeof(long);
+ if (curr > last)
+ break;
+ u = (void *)curr;
+ /* add 0x<min><min><min><min>..<min> then subtract
+ * 0x<max><max><max><max>..<max> to the value to generate a
+ * carry in the lower byte if the byte contains a lower value.
+ * If we generate a bit 7 that was not there, it means the byte
+ * was min..max.
+ */
+ l2 = u->v;
+ l1 = ~l2 & ((~0UL / 255) * 0x80); /* 0x808080...80 */
+ l2 += (~0UL / 255) * min; /* 0x<min><min>..<min> */
+ l2 -= (~0UL / 255) * max; /* 0x<max><max>..<max> */
+ } while ((l1 & l2) == 0);
+
+ last += sizeof(long);
+ if (__builtin_expect(curr < last, 0)) {
+ do {
+ if ((unsigned char)(*curr - min) < (unsigned char)(max - min))
+ return curr;
+ curr++;
+ } while (curr < last);
+ }
+ return NULL;
+}
+
+/* looks for first occurrence of character <chr> in string <ist> and returns
+ * the tail of the string starting with this character, or (ist.end,0) if not
+ * found.
+ */
+static inline struct ist istfind(const struct ist ist, char chr)
+{
+ struct ist ret = ist;
+
+ while (ret.len--) {
+ if (*ret.ptr++ == chr)
+ return ist2(ret.ptr - 1, ret.len + 1);
+ }
+ return ist2(ret.ptr, 0);
+}
+
+/* looks for first occurrence of character different from <chr> in string <ist>
+ * and returns the tail of the string starting at this character, or (ist_end,0)
+ * if not found.
+ */
+static inline struct ist istskip(const struct ist ist, char chr)
+{
+ struct ist ret = ist;
+
+ while (ret.len--) {
+ if (*ret.ptr++ != chr)
+ return ist2(ret.ptr - 1, ret.len + 1);
+ }
+ return ist2(ret.ptr, 0);
+}
+
+/* looks for first occurrence of string <pat> in string <ist> and returns the
+ * tail of the string starting at this position, or (NULL,0) if not found. The
+ * empty pattern is found everywhere.
+ */
+static inline struct ist istist(const struct ist ist, const struct ist pat)
+{
+ struct ist ret = ist;
+ size_t pos;
+
+ if (!pat.len)
+ return ret;
+
+ while (1) {
+ loop:
+ ret = istfind(ret, *pat.ptr);
+ if (ret.len < pat.len)
+ break;
+
+ /* ret.len >= 1, pat.len >= 1 and *ret.ptr == *pat.ptr */
+
+ ret = istnext(ret);
+ for (pos = 0; pos < pat.len - 1; ) {
+ ++pos;
+ if (ret.ptr[pos - 1] != pat.ptr[pos])
+ goto loop;
+ }
+ return ist2(ret.ptr - 1, ret.len + 1);
+ }
+ return IST_NULL;
+}
+
+/*
+ * looks for the first occurrence of <chr> in string <ist> and returns a shorter
+ * ist if char is found.
+ */
+static inline struct ist iststop(const struct ist ist, char chr)
+{
+ size_t len = 0;
+
+ while (len++ < ist.len && ist.ptr[len - 1] != chr)
+ ;
+ return ist2(ist.ptr, len - 1);
+}
+
+/*
+ * advance <.ptr> by <nb> characters.
+ * If <ist> is too short, (ist.end,0) is returned.
+ */
+static inline struct ist istadv(const struct ist ist, const size_t nb)
+{
+ if (ist.len < nb)
+ return ist2(ist.ptr + ist.len, 0);
+ return ist2(ist.ptr + nb, ist.len - nb);
+}
+
+/* Splits the given <ist> at the given character. The returned ist is
+ * equivalent to iststop(ist, delim). The passed <ist> will contain the
+ * remainder of the string, not including the delimiter. In other words
+ * it will be advanced by the length of the returned string plus 1.
+ */
+static inline struct ist istsplit(struct ist *ist, char delim)
+{
+ const struct ist result = iststop(*ist, delim);
+
+ *ist = istadv(*ist, result.len + 1);
+
+ return result;
+}
+
+/*
+ * compare 2 ists and return non-zero if they are the same
+ */
+static inline int istissame(const struct ist ist1, const struct ist ist2)
+{
+ return ((ist1.ptr == ist2.ptr) && (ist1.len == ist2.len));
+}
+
+#ifndef IST_FREESTANDING
+/* This function allocates <size> bytes and returns an `ist` pointing to
+ * the allocated area with size `0`.
+ *
+ * If this function fails to allocate memory the return value is equivalent
+ * to IST_NULL.
+ */
+static inline struct ist istalloc(const size_t size)
+{
+ /* Note: do not use ist2 here, as it triggers a gcc11 warning.
+ * €˜<unknown>€™ may be used uninitialized [-Werror=maybe-uninitialized]
+ *
+ * This warning is reported because the uninitialized memory block
+ * allocated by malloc should not be passed to a const argument as in
+ * ist2.
+ * See https://gcc.gnu.org/onlinedocs/gcc-11.1.0/gcc/Warning-Options.html#index-Wmaybe-uninitialized
+ */
+ return (struct ist){ .ptr = malloc(size), .len = 0 };
+}
+
+/* This function performs the equivalent of free() on the given <ist>.
+ *
+ * After this function returns the value of the given <ist> will be
+ * modified to be equivalent to IST_NULL.
+ */
+static inline void istfree(struct ist *ist)
+{
+ free(ist->ptr);
+ *ist = IST_NULL;
+}
+
+/* This function performs the equivalent of strdup() on the given <src>.
+ *
+ * If this function fails to allocate memory the return value is equivalent
+ * to IST_NULL.
+ */
+static inline struct ist istdup(const struct ist src)
+{
+ const size_t src_size = src.len;
+
+ /* Allocate at least 1 byte to allow duplicating an empty string with
+ * malloc implementations that return NULL for a 0-size allocation.
+ */
+ struct ist dst = istalloc(src_size ? src_size : 1);
+
+ if (isttest(dst)) {
+ istcpy(&dst, src, src_size);
+ }
+
+ return dst;
+}
+#endif
+
+#endif
diff --git a/include/import/lru.h b/include/import/lru.h
new file mode 100644
index 0000000..d674e53
--- /dev/null
+++ b/include/import/lru.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2015 Willy Tarreau <w@1wt.eu>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <import/eb64tree.h>
+
+/* The LRU supports a global cache shared between multiple domains and multiple
+ * versions of their datasets. The purpose is not to have to flush the whole
+ * LRU once a key is updated and not valid anymore (eg: ACL files), as well as
+ * to reliably support concurrent accesses and handle conflicts gracefully. For
+ * each key a pointer to a dataset and its internal data revision are stored.
+ * All lookups verify that these elements match those passed by the caller and
+ * only return a valid entry upon matching. Otherwise the entry is either
+ * allocated or recycled and considered new. New entries are always initialized
+ * with a NULL domain pointer which is used by the caller to detect that the
+ * entry is new and must be populated. Such entries never expire and are
+ * protected from the risk of being recycled. It's then the caller's
+ * responsibility to perform the operation and commit the entry with its latest
+ * result. This domain thus serves as a lock to protect the entry during all
+ * the computation needed to update it. In a simple use case where the cache is
+ * dedicated, it is recommended to pass the LRU head as the domain pointer and
+ * for example zero as the revision. The most common use case for the caller
+ * consists in simply checking that the return is not null and that the domain
+ * is not null, then to use the result. The get() function returns null if it
+ * cannot allocate a node (memory or key being currently updated).
+ */
+struct lru64_list {
+ struct lru64_list *n;
+ struct lru64_list *p;
+};
+
+struct lru64_head {
+ struct lru64_list list;
+ struct eb_root keys;
+ struct lru64 *spare;
+ int cache_size;
+ int cache_usage;
+};
+
+struct lru64 {
+ struct eb64_node node; /* indexing key, typically a hash64 */
+ struct lru64_list lru; /* LRU list */
+ void *domain; /* who this data belongs to */
+ unsigned long long revision; /* data revision (to avoid use-after-free) */
+ void *data; /* returned value, user decides how to use this */
+ void (*free)(void *data); /* function to release data, if needed */
+};
+
+
+struct lru64 *lru64_lookup(unsigned long long key, struct lru64_head *lru, void *domain, unsigned long long revision);
+struct lru64 *lru64_get(unsigned long long key, struct lru64_head *lru, void *domain, unsigned long long revision);
+void lru64_commit(struct lru64 *elem, void *data, void *domain, unsigned long long revision, void (*free)(void *));
+struct lru64_head *lru64_new(int size);
+int lru64_destroy(struct lru64_head *lru);
+void lru64_kill_oldest(struct lru64_head *lru, unsigned long int nb);
diff --git a/include/import/mjson.h b/include/import/mjson.h
new file mode 100644
index 0000000..b96fd3f
--- /dev/null
+++ b/include/import/mjson.h
@@ -0,0 +1,209 @@
+// Copyright (c) 2018-2020 Cesanta Software Limited
+// All rights reserved
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+#ifndef MJSON_H
+#define MJSON_H
+
+#include <stdarg.h>
+#include <stdlib.h>
+#include <string.h>
+
+#ifndef MJSON_ENABLE_PRINT
+#define MJSON_ENABLE_PRINT 1
+#endif
+
+#ifndef MJSON_ENABLE_RPC
+#define MJSON_ENABLE_RPC 1
+#endif
+
+#ifndef MJSON_ENABLE_BASE64
+#define MJSON_ENABLE_BASE64 1
+#endif
+
+#ifndef MJSON_ENABLE_MERGE
+#define MJSON_ENABLE_MERGE 0
+#elif MJSON_ENABLE_MERGE
+#define MJSON_ENABLE_NEXT 1
+#endif
+
+#ifndef MJSON_ENABLE_PRETTY
+#define MJSON_ENABLE_PRETTY 0
+#elif MJSON_ENABLE_PRETTY
+#define MJSON_ENABLE_NEXT 1
+#endif
+
+#ifndef MJSON_ENABLE_NEXT
+#define MJSON_ENABLE_NEXT 0
+#endif
+
+#ifndef MJSON_RPC_LIST_NAME
+#define MJSON_RPC_LIST_NAME "rpc.list"
+#endif
+
+#ifndef MJSON_DYNBUF_CHUNK
+#define MJSON_DYNBUF_CHUNK 256 // Allocation granularity for print_dynamic_buf
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+enum {
+ MJSON_ERROR_INVALID_INPUT = -1,
+ MJSON_ERROR_TOO_DEEP = -2,
+};
+
+enum mjson_tok {
+ MJSON_TOK_INVALID = 0,
+ MJSON_TOK_KEY = 1,
+ MJSON_TOK_STRING = 11,
+ MJSON_TOK_NUMBER = 12,
+ MJSON_TOK_TRUE = 13,
+ MJSON_TOK_FALSE = 14,
+ MJSON_TOK_NULL = 15,
+ MJSON_TOK_ARRAY = 91,
+ MJSON_TOK_OBJECT = 123,
+};
+#define MJSON_TOK_IS_VALUE(t) ((t) > 10 && (t) < 20)
+
+typedef int (*mjson_cb_t)(int ev, const char *s, int off, int len, void *ud);
+
+#ifndef MJSON_MAX_DEPTH
+#define MJSON_MAX_DEPTH 20
+#endif
+
+int mjson(const char *s, int len, mjson_cb_t cb, void *ud);
+enum mjson_tok mjson_find(const char *s, int len, const char *jp,
+ const char **tokptr, int *toklen);
+int mjson_get_number(const char *s, int len, const char *path, double *v);
+int mjson_get_bool(const char *s, int len, const char *path, int *v);
+int mjson_get_string(const char *s, int len, const char *path, char *to, int n);
+int mjson_get_hex(const char *s, int len, const char *path, char *to, int n);
+
+#if MJSON_ENABLE_NEXT
+int mjson_next(const char *s, int n, int off, int *koff, int *klen, int *voff,
+ int *vlen, int *vtype);
+#endif
+
+#if MJSON_ENABLE_BASE64
+int mjson_get_base64(const char *s, int len, const char *path, char *to, int n);
+int mjson_base64_dec(const char *src, int n, char *dst, int dlen);
+#endif
+
+#if MJSON_ENABLE_PRINT
+typedef int (*mjson_print_fn_t)(const char *buf, int len, void *userdata);
+typedef int (*mjson_vprint_fn_t)(mjson_print_fn_t, void *, va_list *);
+
+struct mjson_fixedbuf {
+ char *ptr;
+ int size, len;
+};
+
+int mjson_printf(mjson_print_fn_t, void *, const char *fmt, ...);
+int mjson_vprintf(mjson_print_fn_t, void *, const char *fmt, va_list ap);
+int mjson_print_str(mjson_print_fn_t, void *, const char *s, int len);
+int mjson_print_int(mjson_print_fn_t, void *, int value, int is_signed);
+int mjson_print_long(mjson_print_fn_t, void *, long value, int is_signed);
+int mjson_print_buf(mjson_print_fn_t fn, void *, const char *buf, int len);
+
+int mjson_print_null(const char *ptr, int len, void *userdata);
+int mjson_print_fixed_buf(const char *ptr, int len, void *userdata);
+int mjson_print_dynamic_buf(const char *ptr, int len, void *userdata);
+
+#if MJSON_ENABLE_PRETTY
+int mjson_pretty(const char *, int, const char *, mjson_print_fn_t, void *);
+#endif
+
+#if MJSON_ENABLE_MERGE
+int mjson_merge(const char *, int, const char *, int, mjson_print_fn_t, void *);
+#endif
+
+#endif // MJSON_ENABLE_PRINT
+
+#if MJSON_ENABLE_RPC
+
+void jsonrpc_init(mjson_print_fn_t, void *userdata);
+int mjson_globmatch(const char *s1, int n1, const char *s2, int n2);
+
+struct jsonrpc_request {
+ struct jsonrpc_ctx *ctx;
+ const char *frame; // Points to the whole frame
+ int frame_len; // Frame length
+ const char *params; // Points to the "params" in the request frame
+ int params_len; // Length of the "params"
+ const char *id; // Points to the "id" in the request frame
+ int id_len; // Length of the "id"
+ const char *method; // Points to the "method" in the request frame
+ int method_len; // Length of the "method"
+ mjson_print_fn_t fn; // Printer function
+ void *fndata; // Printer function data
+ void *userdata; // Callback's user data as specified at export time
+};
+
+struct jsonrpc_method {
+ const char *method;
+ int method_sz;
+ void (*cb)(struct jsonrpc_request *);
+ struct jsonrpc_method *next;
+};
+
+// Main RPC context, stores current request information and a list of
+// exported RPC methods.
+struct jsonrpc_ctx {
+ struct jsonrpc_method *methods;
+ mjson_print_fn_t response_cb;
+ void *response_cb_data;
+};
+
+// Registers function fn under the given name within the given RPC context
+#define jsonrpc_ctx_export(ctx, name, fn) \
+ do { \
+ static struct jsonrpc_method m = {(name), sizeof(name) - 1, (fn), 0}; \
+ m.next = (ctx)->methods; \
+ (ctx)->methods = &m; \
+ } while (0)
+
+void jsonrpc_ctx_init(struct jsonrpc_ctx *ctx, mjson_print_fn_t, void *);
+void jsonrpc_return_error(struct jsonrpc_request *r, int code,
+ const char *message, const char *data_fmt, ...);
+void jsonrpc_return_success(struct jsonrpc_request *r, const char *result_fmt,
+ ...);
+void jsonrpc_ctx_process(struct jsonrpc_ctx *ctx, const char *req, int req_sz,
+ mjson_print_fn_t fn, void *fndata, void *userdata);
+
+extern struct jsonrpc_ctx jsonrpc_default_context;
+
+#define jsonrpc_export(name, fn) \
+ jsonrpc_ctx_export(&jsonrpc_default_context, (name), (fn))
+
+#define jsonrpc_process(buf, len, fn, fnd, ud) \
+ jsonrpc_ctx_process(&jsonrpc_default_context, (buf), (len), (fn), (fnd), (ud))
+
+#define JSONRPC_ERROR_INVALID -32700 /* Invalid JSON was received */
+#define JSONRPC_ERROR_NOT_FOUND -32601 /* The method does not exist */
+#define JSONRPC_ERROR_BAD_PARAMS -32602 /* Invalid params passed */
+#define JSONRPC_ERROR_INTERNAL -32603 /* Internal JSON-RPC error */
+
+#endif // MJSON_ENABLE_RPC
+#ifdef __cplusplus
+}
+#endif
+#endif // MJSON_H
diff --git a/include/import/plock.h b/include/import/plock.h
new file mode 100644
index 0000000..fc001e2
--- /dev/null
+++ b/include/import/plock.h
@@ -0,0 +1,1422 @@
+/* plock - progressive locks
+ *
+ * Copyright (C) 2012-2017 Willy Tarreau <w@1wt.eu>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef PL_PLOCK_H
+#define PL_PLOCK_H
+
+#include "atomic-ops.h"
+#ifdef _POSIX_PRIORITY_SCHEDULING
+#include <sched.h>
+#endif
+
+/* 64 bit */
+#define PLOCK64_RL_1 0x0000000000000004ULL
+#define PLOCK64_RL_2PL 0x00000000FFFFFFF8ULL
+#define PLOCK64_RL_ANY 0x00000000FFFFFFFCULL
+#define PLOCK64_SL_1 0x0000000100000000ULL
+#define PLOCK64_SL_ANY 0x0000000300000000ULL
+#define PLOCK64_WL_1 0x0000000400000000ULL
+#define PLOCK64_WL_2PL 0xFFFFFFF800000000ULL
+#define PLOCK64_WL_ANY 0xFFFFFFFC00000000ULL
+
+/* 32 bit */
+#define PLOCK32_RL_1 0x00000004
+#define PLOCK32_RL_2PL 0x0000FFF8
+#define PLOCK32_RL_ANY 0x0000FFFC
+#define PLOCK32_SL_1 0x00010000
+#define PLOCK32_SL_ANY 0x00030000
+#define PLOCK32_WL_1 0x00040000
+#define PLOCK32_WL_2PL 0xFFF80000
+#define PLOCK32_WL_ANY 0xFFFC0000
+
+/* dereferences <*p> as unsigned long without causing aliasing issues */
+#define pl_deref_long(p) ({ volatile unsigned long *__pl_l = (unsigned long *)(p); *__pl_l; })
+
+/* dereferences <*p> as unsigned int without causing aliasing issues */
+#define pl_deref_int(p) ({ volatile unsigned int *__pl_i = (unsigned int *)(p); *__pl_i; })
+
+/* This function waits for <lock> to release all bits covered by <mask>, and
+ * enforces an exponential backoff using CPU pauses to limit the pollution to
+ * the other threads' caches. The progression follows (1.5^N)-1, limited to
+ * 16384 iterations, which is way sufficient even for very large numbers of
+ * threads. It's possible to disable exponential backoff (EBO) for debugging
+ * purposes by setting PLOCK_DISABLE_EBO, in which case the function will be
+ * replaced with a simpler macro. This may for example be useful to more
+ * easily track callers' CPU usage. The macro was not designed to be used
+ * outside of the functions defined here.
+ */
+#if defined(PLOCK_DISABLE_EBO)
+#define pl_wait_unlock_long(lock, mask) \
+ ({ \
+ unsigned long _r; \
+ do { \
+ pl_cpu_relax(); \
+ _r = pl_deref_long(lock); \
+ } while (_r & mask); \
+ _r; /* return value */ \
+ })
+#else /* not PLOCK_DISABLE_EBO */
+__attribute__((unused,always_inline,no_instrument_function)) inline
+static unsigned long __pl_wait_unlock_long(const unsigned long *lock, const unsigned long mask)
+{
+ unsigned long ret;
+ unsigned int m = 0;
+
+ do {
+ unsigned int loops = m;
+
+#ifdef _POSIX_PRIORITY_SCHEDULING
+ if (loops >= 65536) {
+ sched_yield();
+ loops -= 32768;
+ }
+#endif
+ for (; loops >= 60; loops --)
+ pl_cpu_relax();
+
+ for (; loops >= 1; loops--)
+ pl_barrier();
+
+ ret = pl_load(lock);
+ if (__builtin_expect(ret & mask, 0) == 0)
+ break;
+
+ /* the below produces an exponential growth with loops to lower
+ * values and still growing. This allows competing threads to
+ * wait different times once the threshold is reached.
+ */
+ m = ((m + (m >> 1)) + 2) & 0x3ffff;
+ } while (1);
+
+ return ret;
+}
+
+# if defined(PLOCK_INLINE_EBO)
+__attribute__((unused,always_inline,no_instrument_function)) inline
+# else
+__attribute__((unused,noinline,no_instrument_function))
+# endif
+static unsigned long pl_wait_unlock_long(const unsigned long *lock, const unsigned long mask)
+{
+ return __pl_wait_unlock_long(lock, mask);
+}
+#endif /* PLOCK_DISABLE_EBO */
+
+/* This function waits for <lock> to release all bits covered by <mask>, and
+ * enforces an exponential backoff using CPU pauses to limit the pollution to
+ * the other threads' caches. The progression follows (2^N)-1, limited to 255
+ * iterations, which is way sufficient even for very large numbers of threads.
+ * The function slightly benefits from size optimization under gcc, but Clang
+ * cannot do it, so it's not done here, as it doesn't make a big difference.
+ * It is possible to disable exponential backoff (EBO) for debugging purposes
+ * by setting PLOCK_DISABLE_EBO, in which case the function will be replaced
+ * with a simpler macro. This may for example be useful to more easily track
+ * callers' CPU usage. The macro was not designed to be used outside of the
+ * functions defined here.
+ */
+#if defined(PLOCK_DISABLE_EBO)
+#define pl_wait_unlock_int(lock, mask) \
+ ({ \
+ unsigned int _r; \
+ do { \
+ pl_cpu_relax(); \
+ _r = pl_deref_int(lock); \
+ } while (_r & mask); \
+ _r; /* return value */ \
+ })
+#else
+__attribute__((unused,always_inline,no_instrument_function)) inline
+static unsigned int __pl_wait_unlock_int(const unsigned int *lock, const unsigned int mask)
+{
+ unsigned int ret;
+ unsigned int m = 0;
+
+ do {
+ unsigned int loops = m;
+
+#ifdef _POSIX_PRIORITY_SCHEDULING
+ if (loops >= 65536) {
+ sched_yield();
+ loops -= 32768;
+ }
+#endif
+ for (; loops >= 200; loops -= 10)
+ pl_cpu_relax();
+
+ for (; loops >= 1; loops--)
+ pl_barrier();
+
+ ret = pl_deref_int(lock);
+ if (__builtin_expect(ret & mask, 0) == 0)
+ break;
+
+ /* the below produces an exponential growth with loops to lower
+ * values and still growing. This allows competing threads to
+ * wait different times once the threshold is reached.
+ */
+ m = ((m + (m >> 1)) + 2) & 0x3ffff;
+ } while (1);
+
+ return ret;
+}
+
+# if defined(PLOCK_INLINE_EBO)
+__attribute__((unused,always_inline,no_instrument_function)) inline
+# else
+__attribute__((unused,noinline,no_instrument_function))
+# endif
+static unsigned int pl_wait_unlock_int(const unsigned int *lock, const unsigned int mask)
+{
+ return __pl_wait_unlock_int(lock, mask);
+}
+#endif /* PLOCK_DISABLE_EBO */
+
+/* This function waits for <lock> to change from value <prev> and returns the
+ * new value. It enforces an exponential backoff using CPU pauses to limit the
+ * pollution to the other threads' caches. The progression follows (2^N)-1,
+ * limited to 255 iterations, which is way sufficient even for very large
+ * numbers of threads. It is designed to be called after a first test which
+ * retrieves the previous value, so it starts by waiting. The function slightly
+ * benefits from size optimization under gcc, but Clang cannot do it, so it's
+ * not done here, as it doesn't make a big difference.
+ */
+__attribute__((unused,noinline,no_instrument_function))
+static unsigned long pl_wait_new_long(const unsigned long *lock, const unsigned long prev)
+{
+ unsigned char m = 0;
+ unsigned long curr;
+
+ do {
+ unsigned char loops = m + 1;
+ m = (m << 1) + 1;
+ do {
+ pl_cpu_relax();
+ } while (__builtin_expect(--loops, 0));
+ curr = pl_deref_long(lock);
+ } while (__builtin_expect(curr == prev, 0));
+ return curr;
+}
+
+/* This function waits for <lock> to change from value <prev> and returns the
+ * new value. It enforces an exponential backoff using CPU pauses to limit the
+ * pollution to the other threads' caches. The progression follows (2^N)-1,
+ * limited to 255 iterations, which is way sufficient even for very large
+ * numbers of threads. It is designed to be called after a first test which
+ * retrieves the previous value, so it starts by waiting. The function slightly
+ * benefits from size optimization under gcc, but Clang cannot do it, so it's
+ * not done here, as it doesn't make a big difference.
+ */
+__attribute__((unused,noinline,no_instrument_function))
+static unsigned int pl_wait_new_int(const unsigned int *lock, const unsigned int prev)
+{
+ unsigned char m = 0;
+ unsigned int curr;
+
+ do {
+ unsigned char loops = m + 1;
+ m = (m << 1) + 1;
+ do {
+ pl_cpu_relax();
+ } while (__builtin_expect(--loops, 0));
+ curr = pl_deref_int(lock);
+ } while (__builtin_expect(curr == prev, 0));
+ return curr;
+}
+
+/* request shared read access (R), return non-zero on success, otherwise 0 */
+#define pl_try_r(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ register unsigned long __pl_r = pl_deref_long(lock) & PLOCK64_WL_ANY; \
+ pl_barrier(); \
+ if (!__builtin_expect(__pl_r, 0)) { \
+ __pl_r = pl_ldadd_acq((lock), PLOCK64_RL_1) & PLOCK64_WL_ANY; \
+ if (__builtin_expect(__pl_r, 0)) \
+ pl_sub_noret((lock), PLOCK64_RL_1); \
+ } \
+ !__pl_r; /* return value */ \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ register unsigned int __pl_r = pl_deref_int(lock) & PLOCK32_WL_ANY; \
+ pl_barrier(); \
+ if (!__builtin_expect(__pl_r, 0)) { \
+ __pl_r = pl_ldadd_acq((lock), PLOCK32_RL_1) & PLOCK32_WL_ANY; \
+ if (__builtin_expect(__pl_r, 0)) \
+ pl_sub_noret((lock), PLOCK32_RL_1); \
+ } \
+ !__pl_r; /* return value */ \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_try_r__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_try_r__(__FILE__,__LINE__); \
+ 0; \
+ }) \
+)
+
+/* request shared read access (R) and wait for it. In order not to disturb a W
+ * lock waiting for all readers to leave, we first check if a W lock is held
+ * before trying to claim the R lock.
+ */
+#define pl_take_r(lock) \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ register unsigned long *__lk_r = (unsigned long *)(lock); \
+ register unsigned long __set_r = PLOCK64_RL_1; \
+ register unsigned long __msk_r = PLOCK64_WL_ANY; \
+ register unsigned long __old_r = pl_cmpxchg(__lk_r, 0, __set_r); \
+ if (__old_r) { \
+ while (1) { \
+ if (__old_r & __msk_r) \
+ pl_wait_unlock_long(__lk_r, __msk_r); \
+ if (!(pl_ldadd_acq(__lk_r, __set_r) & __msk_r)) \
+ break; \
+ __old_r = pl_sub_lax(__lk_r, __set_r); \
+ } \
+ } \
+ pl_barrier(); \
+ 0; \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ register unsigned int *__lk_r = (unsigned int *)(lock); \
+ register unsigned int __set_r = PLOCK32_RL_1; \
+ register unsigned int __msk_r = PLOCK32_WL_ANY; \
+ register unsigned int __old_r = pl_cmpxchg(__lk_r, 0, __set_r); \
+ if (__old_r) { \
+ while (1) { \
+ if (__old_r & __msk_r) \
+ pl_wait_unlock_int(__lk_r, __msk_r); \
+ if (!(pl_ldadd_acq(__lk_r, __set_r) & __msk_r)) \
+ break; \
+ __old_r = pl_sub_lax(__lk_r, __set_r); \
+ } \
+ } \
+ pl_barrier(); \
+ 0; \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_take_r__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_take_r__(__FILE__,__LINE__); \
+ 0; \
+ })
+
+/* release the read access (R) lock */
+#define pl_drop_r(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ pl_barrier(); \
+ pl_sub_noret_rel(lock, PLOCK64_RL_1); \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ pl_barrier(); \
+ pl_sub_noret_rel(lock, PLOCK32_RL_1); \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_drop_r__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_drop_r__(__FILE__,__LINE__); \
+ }) \
+)
+
+/* request a seek access (S), return non-zero on success, otherwise 0 */
+#define pl_try_s(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ register unsigned long __pl_r = pl_deref_long(lock); \
+ pl_barrier(); \
+ if (!__builtin_expect(__pl_r & (PLOCK64_WL_ANY | PLOCK64_SL_ANY), 0)) { \
+ __pl_r = pl_ldadd_acq((lock), PLOCK64_SL_1 | PLOCK64_RL_1) & \
+ (PLOCK64_WL_ANY | PLOCK64_SL_ANY); \
+ if (__builtin_expect(__pl_r, 0)) \
+ pl_sub_noret_lax((lock), PLOCK64_SL_1 | PLOCK64_RL_1); \
+ } \
+ !__pl_r; /* return value */ \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ register unsigned int __pl_r = pl_deref_int(lock); \
+ pl_barrier(); \
+ if (!__builtin_expect(__pl_r & (PLOCK32_WL_ANY | PLOCK32_SL_ANY), 0)) { \
+ __pl_r = pl_ldadd_acq((lock), PLOCK32_SL_1 | PLOCK32_RL_1) & \
+ (PLOCK32_WL_ANY | PLOCK32_SL_ANY); \
+ if (__builtin_expect(__pl_r, 0)) \
+ pl_sub_noret_lax((lock), PLOCK32_SL_1 | PLOCK32_RL_1); \
+ } \
+ !__pl_r; /* return value */ \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_try_s__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_try_s__(__FILE__,__LINE__); \
+ 0; \
+ }) \
+)
+
+/* request a seek access (S) and wait for it. The lock is immediately claimed,
+ * and only upon failure an exponential backoff is used. S locks rarely compete
+ * with W locks so S will generally not disturb W. As the S lock may be used as
+ * a spinlock, it's important to grab it as fast as possible.
+ */
+#define pl_take_s(lock) \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ register unsigned long *__lk_r = (unsigned long *)(lock); \
+ register unsigned long __set_r = PLOCK64_SL_1 | PLOCK64_RL_1; \
+ register unsigned long __msk_r = PLOCK64_WL_ANY | PLOCK64_SL_ANY; \
+ while (1) { \
+ if (!__builtin_expect(pl_ldadd_acq(__lk_r, __set_r) & __msk_r, 0)) \
+ break; \
+ pl_sub_noret_lax(__lk_r, __set_r); \
+ pl_wait_unlock_long(__lk_r, __msk_r); \
+ } \
+ pl_barrier(); \
+ 0; \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ register unsigned int *__lk_r = (unsigned int *)(lock); \
+ register unsigned int __set_r = PLOCK32_SL_1 | PLOCK32_RL_1; \
+ register unsigned int __msk_r = PLOCK32_WL_ANY | PLOCK32_SL_ANY; \
+ while (1) { \
+ if (!__builtin_expect(pl_ldadd_acq(__lk_r, __set_r) & __msk_r, 0)) \
+ break; \
+ pl_sub_noret_lax(__lk_r, __set_r); \
+ pl_wait_unlock_int(__lk_r, __msk_r); \
+ } \
+ pl_barrier(); \
+ 0; \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_take_s__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_take_s__(__FILE__,__LINE__); \
+ 0; \
+ })
+
+/* release the seek access (S) lock */
+#define pl_drop_s(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ pl_barrier(); \
+ pl_sub_noret_rel(lock, PLOCK64_SL_1 + PLOCK64_RL_1); \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ pl_barrier(); \
+ pl_sub_noret_rel(lock, PLOCK32_SL_1 + PLOCK32_RL_1); \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_drop_s__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_drop_s__(__FILE__,__LINE__); \
+ }) \
+)
+
+/* drop the S lock and go back to the R lock */
+#define pl_stor(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ pl_barrier(); \
+ pl_sub_noret(lock, PLOCK64_SL_1); \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ pl_barrier(); \
+ pl_sub_noret(lock, PLOCK32_SL_1); \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_stor__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_stor__(__FILE__,__LINE__); \
+ }) \
+)
+
+/* take the W lock under the S lock */
+#define pl_stow(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ register unsigned long __pl_r = pl_ldadd((lock), PLOCK64_WL_1); \
+ if (__pl_r & (PLOCK64_RL_ANY & ~PLOCK64_RL_1)) \
+ __pl_r = pl_wait_unlock_long((const unsigned long*)lock, (PLOCK64_RL_ANY & ~PLOCK64_RL_1)); \
+ pl_barrier(); \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ register unsigned int __pl_r = pl_ldadd((lock), PLOCK32_WL_1); \
+ if (__pl_r & (PLOCK32_RL_ANY & ~PLOCK32_RL_1)) \
+ __pl_r = pl_wait_unlock_int((const unsigned int*)lock, (PLOCK32_RL_ANY & ~PLOCK32_RL_1)); \
+ pl_barrier(); \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_stow__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_stow__(__FILE__,__LINE__); \
+ }) \
+)
+
+/* drop the W lock and go back to the S lock */
+#define pl_wtos(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ pl_barrier(); \
+ pl_sub_noret(lock, PLOCK64_WL_1); \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ pl_barrier(); \
+ pl_sub_noret(lock, PLOCK32_WL_1); \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_wtos__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_wtos__(__FILE__,__LINE__); \
+ }) \
+)
+
+/* drop the W lock and go back to the R lock */
+#define pl_wtor(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ pl_barrier(); \
+ pl_sub_noret(lock, PLOCK64_WL_1 | PLOCK64_SL_1); \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ pl_barrier(); \
+ pl_sub_noret(lock, PLOCK32_WL_1 | PLOCK32_SL_1); \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_wtor__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_wtor__(__FILE__,__LINE__); \
+ }) \
+)
+
+/* request a write access (W), return non-zero on success, otherwise 0.
+ *
+ * Below there is something important : by taking both W and S, we will cause
+ * an overflow of W at 4/5 of the maximum value that can be stored into W due
+ * to the fact that S is 2 bits, so we're effectively adding 5 to the word
+ * composed by W:S. But for all words multiple of 4 bits, the maximum value is
+ * multiple of 15 thus of 5. So the largest value we can store with all bits
+ * set to one will be met by adding 5, and then adding 5 again will place value
+ * 1 in W and value 0 in S, so we never leave W with 0. Also, even upon such an
+ * overflow, there's no risk to confuse it with an atomic lock because R is not
+ * null since it will not have overflown. For 32-bit locks, this situation
+ * happens when exactly 13108 threads try to grab the lock at once, W=1, S=0
+ * and R=13108. For 64-bit locks, it happens at 858993460 concurrent writers
+ * where W=1, S=0 and R=858993460.
+ */
+#define pl_try_w(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ register unsigned long __pl_r = pl_deref_long(lock); \
+ pl_barrier(); \
+ if (!__builtin_expect(__pl_r & (PLOCK64_WL_ANY | PLOCK64_SL_ANY), 0)) { \
+ __pl_r = pl_ldadd_acq((lock), PLOCK64_WL_1 | PLOCK64_SL_1 | PLOCK64_RL_1);\
+ if (__builtin_expect(__pl_r & (PLOCK64_WL_ANY | PLOCK64_SL_ANY), 0)) { \
+ /* a writer, seeker or atomic is present, let's leave */ \
+ pl_sub_noret_lax((lock), PLOCK64_WL_1 | PLOCK64_SL_1 | PLOCK64_RL_1);\
+ __pl_r &= (PLOCK64_WL_ANY | PLOCK64_SL_ANY); /* return value */\
+ } else { \
+ /* wait for all other readers to leave */ \
+ while (__pl_r) \
+ __pl_r = pl_deref_long(lock) - \
+ (PLOCK64_WL_1 | PLOCK64_SL_1 | PLOCK64_RL_1); \
+ } \
+ } \
+ !__pl_r; /* return value */ \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ register unsigned int __pl_r = pl_deref_int(lock); \
+ pl_barrier(); \
+ if (!__builtin_expect(__pl_r & (PLOCK32_WL_ANY | PLOCK32_SL_ANY), 0)) { \
+ __pl_r = pl_ldadd_acq((lock), PLOCK32_WL_1 | PLOCK32_SL_1 | PLOCK32_RL_1);\
+ if (__builtin_expect(__pl_r & (PLOCK32_WL_ANY | PLOCK32_SL_ANY), 0)) { \
+ /* a writer, seeker or atomic is present, let's leave */ \
+ pl_sub_noret_lax((lock), PLOCK32_WL_1 | PLOCK32_SL_1 | PLOCK32_RL_1);\
+ __pl_r &= (PLOCK32_WL_ANY | PLOCK32_SL_ANY); /* return value */\
+ } else { \
+ /* wait for all other readers to leave */ \
+ while (__pl_r) \
+ __pl_r = pl_deref_int(lock) - \
+ (PLOCK32_WL_1 | PLOCK32_SL_1 | PLOCK32_RL_1); \
+ } \
+ } \
+ !__pl_r; /* return value */ \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_try_w__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_try_w__(__FILE__,__LINE__); \
+ 0; \
+ }) \
+)
+
+/* request a write access (W) and wait for it. The lock is immediately claimed,
+ * and only upon failure an exponential backoff is used.
+ */
+#define pl_take_w(lock) \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ register unsigned long *__lk_r = (unsigned long *)(lock); \
+ register unsigned long __set_r = PLOCK64_WL_1 | PLOCK64_SL_1 | PLOCK64_RL_1; \
+ register unsigned long __msk_r = PLOCK64_WL_ANY | PLOCK64_SL_ANY; \
+ register unsigned long __pl_r; \
+ while (1) { \
+ __pl_r = pl_ldadd_acq(__lk_r, __set_r); \
+ if (!__builtin_expect(__pl_r & __msk_r, 0)) \
+ break; \
+ pl_sub_noret_lax(__lk_r, __set_r); \
+ __pl_r = pl_wait_unlock_long(__lk_r, __msk_r); \
+ } \
+ /* wait for all other readers to leave */ \
+ if (__builtin_expect(__pl_r & PLOCK64_RL_ANY, 0)) \
+ __pl_r = pl_wait_unlock_long(__lk_r, (PLOCK64_RL_ANY & ~PLOCK64_RL_1)) - __set_r; \
+ pl_barrier(); \
+ 0; \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ register unsigned int *__lk_r = (unsigned int *)(lock); \
+ register unsigned int __set_r = PLOCK32_WL_1 | PLOCK32_SL_1 | PLOCK32_RL_1; \
+ register unsigned int __msk_r = PLOCK32_WL_ANY | PLOCK32_SL_ANY; \
+ register unsigned int __pl_r; \
+ while (1) { \
+ __pl_r = pl_ldadd_acq(__lk_r, __set_r); \
+ if (!__builtin_expect(__pl_r & __msk_r, 0)) \
+ break; \
+ pl_sub_noret_lax(__lk_r, __set_r); \
+ __pl_r = pl_wait_unlock_int(__lk_r, __msk_r); \
+ } \
+ /* wait for all other readers to leave */ \
+ if (__builtin_expect(__pl_r & PLOCK32_RL_ANY, 0)) \
+ __pl_r = pl_wait_unlock_int(__lk_r, (PLOCK32_RL_ANY & ~PLOCK32_RL_1)) - __set_r; \
+ pl_barrier(); \
+ 0; \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_take_w__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_take_w__(__FILE__,__LINE__); \
+ 0; \
+ })
+
+/* drop the write (W) lock entirely */
+#define pl_drop_w(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ pl_barrier(); \
+ pl_sub_noret_rel(lock, PLOCK64_WL_1 | PLOCK64_SL_1 | PLOCK64_RL_1); \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ pl_barrier(); \
+ pl_sub_noret_rel(lock, PLOCK32_WL_1 | PLOCK32_SL_1 | PLOCK32_RL_1); \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_drop_w__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_drop_w__(__FILE__,__LINE__); \
+ }) \
+)
+
+/* Try to upgrade from R to S, return non-zero on success, otherwise 0.
+ * This lock will fail if S or W are already held. In case of failure to grab
+ * the lock, it MUST NOT be retried without first dropping R, or it may never
+ * complete due to S waiting for R to leave before upgrading to W.
+ */
+#define pl_try_rtos(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ register unsigned long __pl_r; \
+ __pl_r = pl_ldadd_acq((lock), PLOCK64_SL_1) & (PLOCK64_WL_ANY | PLOCK64_SL_ANY);\
+ if (__builtin_expect(__pl_r, 0)) \
+ pl_sub_noret_lax((lock), PLOCK64_SL_1); \
+ !__pl_r; /* return value */ \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ register unsigned int __pl_r; \
+ __pl_r = pl_ldadd_acq((lock), PLOCK32_SL_1) & (PLOCK32_WL_ANY | PLOCK32_SL_ANY);\
+ if (__builtin_expect(__pl_r, 0)) \
+ pl_sub_noret_lax((lock), PLOCK32_SL_1); \
+ !__pl_r; /* return value */ \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_try_rtos__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_try_rtos__(__FILE__,__LINE__); \
+ 0; \
+ }) \
+)
+
+
+/* Try to upgrade from R to W, return non-zero on success, otherwise 0.
+ * This lock will fail if S or W are already held. In case of failure to grab
+ * the lock, it MUST NOT be retried without first dropping R, or it may never
+ * complete due to S waiting for R to leave before upgrading to W. It waits for
+ * the last readers to leave.
+ */
+#define pl_try_rtow(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ register unsigned long *__lk_r = (unsigned long *)(lock); \
+ register unsigned long __set_r = PLOCK64_WL_1 | PLOCK64_SL_1; \
+ register unsigned long __msk_r = PLOCK64_WL_ANY | PLOCK64_SL_ANY; \
+ register unsigned long __pl_r; \
+ pl_barrier(); \
+ while (1) { \
+ __pl_r = pl_ldadd_acq(__lk_r, __set_r); \
+ if (__builtin_expect(__pl_r & __msk_r, 0)) { \
+ if (pl_ldadd_lax(__lk_r, - __set_r)) \
+ break; /* the caller needs to drop the lock now */ \
+ continue; /* lock was released, try again */ \
+ } \
+ /* ok we're the only writer, wait for readers to leave */ \
+ while (__builtin_expect(__pl_r, 0)) \
+ __pl_r = pl_deref_long(__lk_r) - (PLOCK64_WL_1|PLOCK64_SL_1|PLOCK64_RL_1); \
+ /* now return with __pl_r = 0 */ \
+ break; \
+ } \
+ !__pl_r; /* return value */ \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ register unsigned int *__lk_r = (unsigned int *)(lock); \
+ register unsigned int __set_r = PLOCK32_WL_1 | PLOCK32_SL_1; \
+ register unsigned int __msk_r = PLOCK32_WL_ANY | PLOCK32_SL_ANY; \
+ register unsigned int __pl_r; \
+ pl_barrier(); \
+ while (1) { \
+ __pl_r = pl_ldadd_acq(__lk_r, __set_r); \
+ if (__builtin_expect(__pl_r & __msk_r, 0)) { \
+ if (pl_ldadd_lax(__lk_r, - __set_r)) \
+ break; /* the caller needs to drop the lock now */ \
+ continue; /* lock was released, try again */ \
+ } \
+ /* ok we're the only writer, wait for readers to leave */ \
+ while (__builtin_expect(__pl_r, 0)) \
+ __pl_r = pl_deref_int(__lk_r) - (PLOCK32_WL_1|PLOCK32_SL_1|PLOCK32_RL_1); \
+ /* now return with __pl_r = 0 */ \
+ break; \
+ } \
+ !__pl_r; /* return value */ \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_try_rtow__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_try_rtow__(__FILE__,__LINE__); \
+ 0; \
+ }) \
+)
+
+
+/* request atomic write access (A), return non-zero on success, otherwise 0.
+ * It's a bit tricky as we only use the W bits for this and want to distinguish
+ * between other atomic users and regular lock users. We have to give up if an
+ * S lock appears. It's possible that such a lock stays hidden in the W bits
+ * after an overflow, but in this case R is still held, ensuring we stay in the
+ * loop until we discover the conflict. The lock only return successfully if all
+ * readers are gone (or converted to A).
+ */
+#define pl_try_a(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ register unsigned long __pl_r = pl_deref_long(lock) & PLOCK64_SL_ANY; \
+ pl_barrier(); \
+ if (!__builtin_expect(__pl_r, 0)) { \
+ __pl_r = pl_ldadd_acq((lock), PLOCK64_WL_1); \
+ while (1) { \
+ if (__builtin_expect(__pl_r & PLOCK64_SL_ANY, 0)) { \
+ pl_sub_noret_lax((lock), PLOCK64_WL_1); \
+ break; /* return !__pl_r */ \
+ } \
+ __pl_r &= PLOCK64_RL_ANY; \
+ if (!__builtin_expect(__pl_r, 0)) \
+ break; /* return !__pl_r */ \
+ __pl_r = pl_deref_long(lock); \
+ } \
+ } \
+ !__pl_r; /* return value */ \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ register unsigned int __pl_r = pl_deref_int(lock) & PLOCK32_SL_ANY; \
+ pl_barrier(); \
+ if (!__builtin_expect(__pl_r, 0)) { \
+ __pl_r = pl_ldadd_acq((lock), PLOCK32_WL_1); \
+ while (1) { \
+ if (__builtin_expect(__pl_r & PLOCK32_SL_ANY, 0)) { \
+ pl_sub_noret_lax((lock), PLOCK32_WL_1); \
+ break; /* return !__pl_r */ \
+ } \
+ __pl_r &= PLOCK32_RL_ANY; \
+ if (!__builtin_expect(__pl_r, 0)) \
+ break; /* return !__pl_r */ \
+ __pl_r = pl_deref_int(lock); \
+ } \
+ } \
+ !__pl_r; /* return value */ \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_try_a__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_try_a__(__FILE__,__LINE__); \
+ 0; \
+ }) \
+)
+
+/* request atomic write access (A) and wait for it. See comments in pl_try_a() for
+ * explanations.
+ */
+#define pl_take_a(lock) \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ register unsigned long *__lk_r = (unsigned long *)(lock); \
+ register unsigned long __set_r = PLOCK64_WL_1; \
+ register unsigned long __msk_r = PLOCK64_SL_ANY; \
+ register unsigned long __pl_r; \
+ __pl_r = pl_ldadd_acq(__lk_r, __set_r); \
+ while (__builtin_expect(__pl_r & PLOCK64_RL_ANY, 0)) { \
+ if (__builtin_expect(__pl_r & __msk_r, 0)) { \
+ pl_sub_noret_lax(__lk_r, __set_r); \
+ pl_wait_unlock_long(__lk_r, __msk_r); \
+ __pl_r = pl_ldadd_acq(__lk_r, __set_r); \
+ continue; \
+ } \
+ /* wait for all readers to leave or upgrade */ \
+ pl_cpu_relax(); pl_cpu_relax(); pl_cpu_relax(); \
+ __pl_r = pl_deref_long(lock); \
+ } \
+ pl_barrier(); \
+ 0; \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ register unsigned int *__lk_r = (unsigned int *)(lock); \
+ register unsigned int __set_r = PLOCK32_WL_1; \
+ register unsigned int __msk_r = PLOCK32_SL_ANY; \
+ register unsigned int __pl_r; \
+ __pl_r = pl_ldadd_acq(__lk_r, __set_r); \
+ while (__builtin_expect(__pl_r & PLOCK32_RL_ANY, 0)) { \
+ if (__builtin_expect(__pl_r & __msk_r, 0)) { \
+ pl_sub_noret_lax(__lk_r, __set_r); \
+ pl_wait_unlock_int(__lk_r, __msk_r); \
+ __pl_r = pl_ldadd_acq(__lk_r, __set_r); \
+ continue; \
+ } \
+ /* wait for all readers to leave or upgrade */ \
+ pl_cpu_relax(); pl_cpu_relax(); pl_cpu_relax(); \
+ __pl_r = pl_deref_int(lock); \
+ } \
+ pl_barrier(); \
+ 0; \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_take_a__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_take_a__(__FILE__,__LINE__); \
+ 0; \
+ })
+
+/* release atomic write access (A) lock */
+#define pl_drop_a(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ pl_barrier(); \
+ pl_sub_noret_rel(lock, PLOCK64_WL_1); \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ pl_barrier(); \
+ pl_sub_noret_rel(lock, PLOCK32_WL_1); \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_drop_a__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_drop_a__(__FILE__,__LINE__); \
+ }) \
+)
+
+/* Downgrade A to R. Inc(R), dec(W) then wait for W==0 */
+#define pl_ator(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ register unsigned long *__lk_r = (unsigned long *)(lock); \
+ register unsigned long __set_r = PLOCK64_RL_1 - PLOCK64_WL_1; \
+ register unsigned long __msk_r = PLOCK64_WL_ANY; \
+ register unsigned long __pl_r = pl_ldadd(__lk_r, __set_r) + __set_r; \
+ while (__builtin_expect(__pl_r & __msk_r, 0)) { \
+ __pl_r = pl_wait_unlock_long(__lk_r, __msk_r); \
+ } \
+ pl_barrier(); \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ register unsigned int *__lk_r = (unsigned int *)(lock); \
+ register unsigned int __set_r = PLOCK32_RL_1 - PLOCK32_WL_1; \
+ register unsigned int __msk_r = PLOCK32_WL_ANY; \
+ register unsigned int __pl_r = pl_ldadd(__lk_r, __set_r) + __set_r; \
+ while (__builtin_expect(__pl_r & __msk_r, 0)) { \
+ __pl_r = pl_wait_unlock_int(__lk_r, __msk_r); \
+ } \
+ pl_barrier(); \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_ator__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_ator__(__FILE__,__LINE__); \
+ }) \
+)
+
+/* Try to upgrade from R to A, return non-zero on success, otherwise 0.
+ * This lock will fail if S is held or appears while waiting (typically due to
+ * a previous grab that was disguised as a W due to an overflow). In case of
+ * failure to grab the lock, it MUST NOT be retried without first dropping R,
+ * or it may never complete due to S waiting for R to leave before upgrading
+ * to W. The lock succeeds once there's no more R (ie all of them have either
+ * completed or were turned to A).
+ */
+#define pl_try_rtoa(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ register unsigned long __pl_r = pl_deref_long(lock) & PLOCK64_SL_ANY; \
+ pl_barrier(); \
+ if (!__builtin_expect(__pl_r, 0)) { \
+ __pl_r = pl_ldadd_acq((lock), PLOCK64_WL_1 - PLOCK64_RL_1); \
+ while (1) { \
+ if (__builtin_expect(__pl_r & PLOCK64_SL_ANY, 0)) { \
+ pl_sub_noret_lax((lock), PLOCK64_WL_1 - PLOCK64_RL_1); \
+ break; /* return !__pl_r */ \
+ } \
+ __pl_r &= PLOCK64_RL_ANY; \
+ if (!__builtin_expect(__pl_r, 0)) \
+ break; /* return !__pl_r */ \
+ __pl_r = pl_deref_long(lock); \
+ } \
+ } \
+ !__pl_r; /* return value */ \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ register unsigned int __pl_r = pl_deref_int(lock) & PLOCK32_SL_ANY; \
+ pl_barrier(); \
+ if (!__builtin_expect(__pl_r, 0)) { \
+ __pl_r = pl_ldadd_acq((lock), PLOCK32_WL_1 - PLOCK32_RL_1); \
+ while (1) { \
+ if (__builtin_expect(__pl_r & PLOCK32_SL_ANY, 0)) { \
+ pl_sub_noret_lax((lock), PLOCK32_WL_1 - PLOCK32_RL_1); \
+ break; /* return !__pl_r */ \
+ } \
+ __pl_r &= PLOCK32_RL_ANY; \
+ if (!__builtin_expect(__pl_r, 0)) \
+ break; /* return !__pl_r */ \
+ __pl_r = pl_deref_int(lock); \
+ } \
+ } \
+ !__pl_r; /* return value */ \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_try_rtoa__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_try_rtoa__(__FILE__,__LINE__); \
+ 0; \
+ }) \
+)
+
+
+/*
+ * The following operations cover the multiple writers model : U->R->J->C->A
+ */
+
+
+/* Upgrade R to J. Inc(W) then wait for R==W or S != 0 */
+#define pl_rtoj(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ register unsigned long *__lk_r = (unsigned long *)(lock); \
+ register unsigned long __pl_r = pl_ldadd_acq(__lk_r, PLOCK64_WL_1) + PLOCK64_WL_1;\
+ register unsigned char __m = 0; \
+ while (!(__pl_r & PLOCK64_SL_ANY) && \
+ (__pl_r / PLOCK64_WL_1 != (__pl_r & PLOCK64_RL_ANY) / PLOCK64_RL_1)) { \
+ unsigned char __loops = __m + 1; \
+ __m = (__m << 1) + 1; \
+ do { \
+ pl_cpu_relax(); \
+ pl_cpu_relax(); \
+ } while (--__loops); \
+ __pl_r = pl_deref_long(__lk_r); \
+ } \
+ pl_barrier(); \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ register unsigned int *__lk_r = (unsigned int *)(lock); \
+ register unsigned int __pl_r = pl_ldadd_acq(__lk_r, PLOCK32_WL_1) + PLOCK32_WL_1;\
+ register unsigned char __m = 0; \
+ while (!(__pl_r & PLOCK32_SL_ANY) && \
+ (__pl_r / PLOCK32_WL_1 != (__pl_r & PLOCK32_RL_ANY) / PLOCK32_RL_1)) { \
+ unsigned char __loops = __m + 1; \
+ __m = (__m << 1) + 1; \
+ do { \
+ pl_cpu_relax(); \
+ pl_cpu_relax(); \
+ } while (--__loops); \
+ __pl_r = pl_deref_int(__lk_r); \
+ } \
+ pl_barrier(); \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_rtoj__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_rtoj__(__FILE__,__LINE__); \
+ }) \
+)
+
+/* Upgrade J to C. Set S. Only one thread needs to do it though it's idempotent */
+#define pl_jtoc(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ register unsigned long *__lk_r = (unsigned long *)(lock); \
+ register unsigned long __pl_r = pl_deref_long(__lk_r); \
+ if (!(__pl_r & PLOCK64_SL_ANY)) \
+ pl_or_noret(__lk_r, PLOCK64_SL_1); \
+ pl_barrier(); \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ register unsigned int *__lk_r = (unsigned int *)(lock); \
+ register unsigned int __pl_r = pl_deref_int(__lk_r); \
+ if (!(__pl_r & PLOCK32_SL_ANY)) \
+ pl_or_noret(__lk_r, PLOCK32_SL_1); \
+ pl_barrier(); \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_jtoc__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_jtoc__(__FILE__,__LINE__); \
+ }) \
+)
+
+/* Upgrade R to C. Inc(W) then wait for R==W or S != 0 */
+#define pl_rtoc(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ register unsigned long *__lk_r = (unsigned long *)(lock); \
+ register unsigned long __pl_r = pl_ldadd_acq(__lk_r, PLOCK64_WL_1) + PLOCK64_WL_1;\
+ register unsigned char __m = 0; \
+ while (__builtin_expect(!(__pl_r & PLOCK64_SL_ANY), 0)) { \
+ unsigned char __loops; \
+ if (__pl_r / PLOCK64_WL_1 == (__pl_r & PLOCK64_RL_ANY) / PLOCK64_RL_1) { \
+ pl_or_noret(__lk_r, PLOCK64_SL_1); \
+ break; \
+ } \
+ __loops = __m + 1; \
+ __m = (__m << 1) + 1; \
+ do { \
+ pl_cpu_relax(); \
+ pl_cpu_relax(); \
+ } while (--__loops); \
+ __pl_r = pl_deref_long(__lk_r); \
+ } \
+ pl_barrier(); \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ register unsigned int *__lk_r = (unsigned int *)(lock); \
+ register unsigned int __pl_r = pl_ldadd_acq(__lk_r, PLOCK32_WL_1) + PLOCK32_WL_1;\
+ register unsigned char __m = 0; \
+ while (__builtin_expect(!(__pl_r & PLOCK32_SL_ANY), 0)) { \
+ unsigned char __loops; \
+ if (__pl_r / PLOCK32_WL_1 == (__pl_r & PLOCK32_RL_ANY) / PLOCK32_RL_1) { \
+ pl_or_noret(__lk_r, PLOCK32_SL_1); \
+ break; \
+ } \
+ __loops = __m + 1; \
+ __m = (__m << 1) + 1; \
+ do { \
+ pl_cpu_relax(); \
+ pl_cpu_relax(); \
+ } while (--__loops); \
+ __pl_r = pl_deref_int(__lk_r); \
+ } \
+ pl_barrier(); \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_rtoj__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_rtoj__(__FILE__,__LINE__); \
+ }) \
+)
+
+/* Drop the claim (C) lock : R--,W-- then clear S if !R */
+#define pl_drop_c(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ register unsigned long *__lk_r = (unsigned long *)(lock); \
+ register unsigned long __set_r = - PLOCK64_RL_1 - PLOCK64_WL_1; \
+ register unsigned long __pl_r = pl_ldadd(__lk_r, __set_r) + __set_r; \
+ if (!(__pl_r & PLOCK64_RL_ANY)) \
+ pl_and_noret(__lk_r, ~PLOCK64_SL_1); \
+ pl_barrier(); \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ register unsigned int *__lk_r = (unsigned int *)(lock); \
+ register unsigned int __set_r = - PLOCK32_RL_1 - PLOCK32_WL_1; \
+ register unsigned int __pl_r = pl_ldadd(__lk_r, __set_r) + __set_r; \
+ if (!(__pl_r & PLOCK32_RL_ANY)) \
+ pl_and_noret(__lk_r, ~PLOCK32_SL_1); \
+ pl_barrier(); \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_drop_c__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_drop_c__(__FILE__,__LINE__); \
+ }) \
+)
+
+/* Upgrade C to A. R-- then wait for !S or clear S if !R */
+#define pl_ctoa(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ register unsigned long *__lk_r = (unsigned long *)(lock); \
+ register unsigned long __pl_r = pl_ldadd(__lk_r, -PLOCK64_RL_1) - PLOCK64_RL_1;\
+ while (__pl_r & PLOCK64_SL_ANY) { \
+ if (!(__pl_r & PLOCK64_RL_ANY)) { \
+ pl_and_noret(__lk_r, ~PLOCK64_SL_1); \
+ break; \
+ } \
+ pl_cpu_relax(); \
+ pl_cpu_relax(); \
+ __pl_r = pl_deref_long(__lk_r); \
+ } \
+ pl_barrier(); \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ register unsigned int *__lk_r = (unsigned int *)(lock); \
+ register unsigned int __pl_r = pl_ldadd(__lk_r, -PLOCK32_RL_1) - PLOCK32_RL_1; \
+ while (__pl_r & PLOCK32_SL_ANY) { \
+ if (!(__pl_r & PLOCK32_RL_ANY)) { \
+ pl_and_noret(__lk_r, ~PLOCK32_SL_1); \
+ break; \
+ } \
+ pl_cpu_relax(); \
+ pl_cpu_relax(); \
+ __pl_r = pl_deref_int(__lk_r); \
+ } \
+ pl_barrier(); \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_ctoa__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_ctoa__(__FILE__,__LINE__); \
+ }) \
+)
+
+/* downgrade the atomic write access lock (A) to join (J) */
+#define pl_atoj(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ pl_barrier(); \
+ pl_add_noret(lock, PLOCK64_RL_1); \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ pl_barrier(); \
+ pl_add_noret(lock, PLOCK32_RL_1); \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_atoj__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_atoj__(__FILE__,__LINE__); \
+ }) \
+)
+
+/* Returns non-zero if the thread calling it is the last writer, otherwise zero. It is
+ * designed to be called before pl_drop_j(), pl_drop_c() or pl_drop_a() for operations
+ * which need to be called only once.
+ */
+#define pl_last_writer(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ !(pl_deref_long(lock) & PLOCK64_WL_2PL); \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ !(pl_deref_int(lock) & PLOCK32_WL_2PL); \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_last_j__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_last_j__(__FILE__,__LINE__); \
+ 0; \
+ }) \
+)
+
+/* attempt to get an exclusive write access via the J lock and wait for it.
+ * Only one thread may succeed in this operation. It will not conflict with
+ * other users and will first wait for all writers to leave, then for all
+ * readers to leave before starting. This offers a solution to obtain an
+ * exclusive access to a shared resource in the R/J/C/A model. A concurrent
+ * take_a() will wait for this one to finish first. Using a CAS instead of XADD
+ * should make the operation converge slightly faster. Returns non-zero on
+ * success otherwise 0.
+ */
+#define pl_try_j(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ register unsigned long *__lk_r = (unsigned long *)(lock); \
+ register unsigned long __set_r = PLOCK64_WL_1 | PLOCK64_RL_1; \
+ register unsigned long __msk_r = PLOCK64_WL_ANY; \
+ register unsigned long __pl_r; \
+ register unsigned char __m; \
+ pl_wait_unlock_long(__lk_r, __msk_r); \
+ __pl_r = pl_ldadd_acq(__lk_r, __set_r) + __set_r; \
+ /* wait for all other readers to leave */ \
+ __m = 0; \
+ while (__builtin_expect(__pl_r & PLOCK64_RL_2PL, 0)) { \
+ unsigned char __loops; \
+ /* give up on other writers */ \
+ if (__builtin_expect(__pl_r & PLOCK64_WL_2PL, 0)) { \
+ pl_sub_noret_lax(__lk_r, __set_r); \
+ __pl_r = 0; /* failed to get the lock */ \
+ break; \
+ } \
+ __loops = __m + 1; \
+ __m = (__m << 1) + 1; \
+ do { \
+ pl_cpu_relax(); \
+ pl_cpu_relax(); \
+ } while (--__loops); \
+ __pl_r = pl_deref_long(__lk_r); \
+ } \
+ pl_barrier(); \
+ __pl_r; /* return value, cannot be null on success */ \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ register unsigned int *__lk_r = (unsigned int *)(lock); \
+ register unsigned int __set_r = PLOCK32_WL_1 | PLOCK32_RL_1; \
+ register unsigned int __msk_r = PLOCK32_WL_ANY; \
+ register unsigned int __pl_r; \
+ register unsigned char __m; \
+ pl_wait_unlock_int(__lk_r, __msk_r); \
+ __pl_r = pl_ldadd_acq(__lk_r, __set_r) + __set_r; \
+ /* wait for all other readers to leave */ \
+ __m = 0; \
+ while (__builtin_expect(__pl_r & PLOCK32_RL_2PL, 0)) { \
+ unsigned char __loops; \
+ /* but rollback on other writers */ \
+ if (__builtin_expect(__pl_r & PLOCK32_WL_2PL, 0)) { \
+ pl_sub_noret_lax(__lk_r, __set_r); \
+ __pl_r = 0; /* failed to get the lock */ \
+ break; \
+ } \
+ __loops = __m + 1; \
+ __m = (__m << 1) + 1; \
+ do { \
+ pl_cpu_relax(); \
+ pl_cpu_relax(); \
+ } while (--__loops); \
+ __pl_r = pl_deref_int(__lk_r); \
+ } \
+ pl_barrier(); \
+ __pl_r; /* return value, cannot be null on success */ \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_try_j__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_try_j__(__FILE__,__LINE__); \
+ 0; \
+ }) \
+)
+
+/* request an exclusive write access via the J lock and wait for it. Only one
+ * thread may succeed in this operation. It will not conflict with other users
+ * and will first wait for all writers to leave, then for all readers to leave
+ * before starting. This offers a solution to obtain an exclusive access to a
+ * shared resource in the R/J/C/A model. A concurrent take_a() will wait for
+ * this one to finish first. Using a CAS instead of XADD should make the
+ * operation converge slightly faster.
+ */
+#define pl_take_j(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ __label__ __retry; \
+ register unsigned long *__lk_r = (unsigned long *)(lock); \
+ register unsigned long __set_r = PLOCK64_WL_1 | PLOCK64_RL_1; \
+ register unsigned long __msk_r = PLOCK64_WL_ANY; \
+ register unsigned long __pl_r; \
+ register unsigned char __m; \
+ __retry: \
+ pl_wait_unlock_long(__lk_r, __msk_r); \
+ __pl_r = pl_ldadd_acq(__lk_r, __set_r) + __set_r; \
+ /* wait for all other readers to leave */ \
+ __m = 0; \
+ while (__builtin_expect(__pl_r & PLOCK64_RL_2PL, 0)) { \
+ unsigned char __loops; \
+ /* but rollback on other writers */ \
+ if (__builtin_expect(__pl_r & PLOCK64_WL_2PL, 0)) { \
+ pl_sub_noret_lax(__lk_r, __set_r); \
+ goto __retry; \
+ } \
+ __loops = __m + 1; \
+ __m = (__m << 1) + 1; \
+ do { \
+ pl_cpu_relax(); \
+ pl_cpu_relax(); \
+ } while (--__loops); \
+ __pl_r = pl_deref_long(__lk_r); \
+ } \
+ pl_barrier(); \
+ 0; \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ __label__ __retry; \
+ register unsigned int *__lk_r = (unsigned int *)(lock); \
+ register unsigned int __set_r = PLOCK32_WL_1 | PLOCK32_RL_1; \
+ register unsigned int __msk_r = PLOCK32_WL_ANY; \
+ register unsigned int __pl_r; \
+ register unsigned char __m; \
+ __retry: \
+ pl_wait_unlock_int(__lk_r, __msk_r); \
+ __pl_r = pl_ldadd_acq(__lk_r, __set_r) + __set_r; \
+ /* wait for all other readers to leave */ \
+ __m = 0; \
+ while (__builtin_expect(__pl_r & PLOCK32_RL_2PL, 0)) { \
+ unsigned char __loops; \
+ /* but rollback on other writers */ \
+ if (__builtin_expect(__pl_r & PLOCK32_WL_2PL, 0)) { \
+ pl_sub_noret_lax(__lk_r, __set_r); \
+ goto __retry; \
+ } \
+ __loops = __m + 1; \
+ __m = (__m << 1) + 1; \
+ do { \
+ pl_cpu_relax(); \
+ pl_cpu_relax(); \
+ } while (--__loops); \
+ __pl_r = pl_deref_int(__lk_r); \
+ } \
+ pl_barrier(); \
+ 0; \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_take_j__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_take_j__(__FILE__,__LINE__); \
+ 0; \
+ }) \
+)
+
+/* drop the join (J) lock entirely */
+#define pl_drop_j(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ pl_barrier(); \
+ pl_sub_noret_rel(lock, PLOCK64_WL_1 | PLOCK64_RL_1); \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ pl_barrier(); \
+ pl_sub_noret_rel(lock, PLOCK32_WL_1 | PLOCK32_RL_1); \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_drop_j__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_drop_j__(__FILE__,__LINE__); \
+ }) \
+)
+
+/*
+ * The part below is for Low Overhead R/W locks (LORW). These ones are not
+ * upgradable and not necessarily fair but they try to be fast when uncontended
+ * and to limit the cost and perturbation during contention. Writers always
+ * have precedence over readers to preserve latency as much as possible.
+ *
+ * The principle is to offer a fast no-contention path and a limited total
+ * number of writes for the contended path. Since R/W locks are expected to be
+ * used in situations where there is a benefit in separating reads from writes,
+ * it is expected that reads are common (typ >= 50%) and that there is often at
+ * least one reader (otherwise a spinlock wouldn't be a problem). As such, a
+ * reader will try to pass instantly, detect contention and immediately retract
+ * and wait in the queue in case there is contention. A writer will first also
+ * try to pass instantly, and if it fails due to pending readers, it will mark
+ * that it's waiting so that readers stop entering. This will leave the writer
+ * waiting as close as possible to the point of being granted access. New
+ * writers will also notice this previous contention and will wait outside.
+ * This means that a successful access for a reader or a writer requires a
+ * single CAS, and a contended attempt will require one failed CAS and one
+ * successful XADD for a reader, or an optional OR and a N+1 CAS for the
+ * writer.
+ *
+ * A counter of shared users indicates the number of active readers, while a
+ * (single-bit) counter of exclusive writers indicates whether the lock is
+ * currently held for writes. This distinction also permits to use a single
+ * function to release the lock if desired, since the exclusive bit indicates
+ * the state of the caller of unlock(). The WRQ bit is cleared during the
+ * unlock.
+ *
+ * Layout: (32/64 bit):
+ * 31 2 1 0
+ * +-----------+--------------+-----+-----+
+ * | | SHR | WRQ | EXC |
+ * +-----------+--------------+-----+-----+
+ *
+ * In order to minimize operations, the WRQ bit is held during EXC so that the
+ * write waiter that had to fight for EXC doesn't have to release WRQ during
+ * its operations, and will just drop it along with EXC upon unlock.
+ *
+ * This means the following costs:
+ * reader:
+ * success: 1 CAS
+ * failure: 1 CAS + 1 XADD
+ * unlock: 1 SUB
+ * writer:
+ * success: 1 RD + 1 CAS
+ * failure: 1 RD + 1 CAS + 0/1 OR + N CAS
+ * unlock: 1 AND
+ */
+
+#define PLOCK_LORW_EXC_BIT ((sizeof(long) == 8) ? 0 : 0)
+#define PLOCK_LORW_EXC_SIZE ((sizeof(long) == 8) ? 1 : 1)
+#define PLOCK_LORW_EXC_BASE (1UL << PLOCK_LORW_EXC_BIT)
+#define PLOCK_LORW_EXC_MASK (((1UL << PLOCK_LORW_EXC_SIZE) - 1UL) << PLOCK_LORW_EXC_BIT)
+
+#define PLOCK_LORW_WRQ_BIT ((sizeof(long) == 8) ? 1 : 1)
+#define PLOCK_LORW_WRQ_SIZE ((sizeof(long) == 8) ? 1 : 1)
+#define PLOCK_LORW_WRQ_BASE (1UL << PLOCK_LORW_WRQ_BIT)
+#define PLOCK_LORW_WRQ_MASK (((1UL << PLOCK_LORW_WRQ_SIZE) - 1UL) << PLOCK_LORW_WRQ_BIT)
+
+#define PLOCK_LORW_SHR_BIT ((sizeof(long) == 8) ? 2 : 2)
+#define PLOCK_LORW_SHR_SIZE ((sizeof(long) == 8) ? 30 : 30)
+#define PLOCK_LORW_SHR_BASE (1UL << PLOCK_LORW_SHR_BIT)
+#define PLOCK_LORW_SHR_MASK (((1UL << PLOCK_LORW_SHR_SIZE) - 1UL) << PLOCK_LORW_SHR_BIT)
+
+__attribute__((unused,always_inline,no_instrument_function))
+static inline void pl_lorw_rdlock(unsigned long *lock)
+{
+ unsigned long lk = 0;
+
+ /* First, assume we're alone and try to get the read lock (fast path).
+ * It often works because read locks are often used on low-contention
+ * structs.
+ */
+ lk = pl_cmpxchg(lock, 0, PLOCK_LORW_SHR_BASE);
+ if (!lk)
+ return;
+
+ /* so we were not alone, make sure there's no writer waiting for the
+ * lock to be empty of visitors.
+ */
+ if (lk & PLOCK_LORW_WRQ_MASK)
+#if defined(PLOCK_LORW_INLINE_WAIT) && !defined(PLOCK_DISABLE_EBO)
+ lk = __pl_wait_unlock_long(lock, PLOCK_LORW_WRQ_MASK);
+#else
+ lk = pl_wait_unlock_long(lock, PLOCK_LORW_WRQ_MASK);
+#endif
+
+ /* count us as visitor among others */
+ lk = pl_ldadd_acq(lock, PLOCK_LORW_SHR_BASE);
+
+ /* wait for end of exclusive access if any */
+ if (lk & PLOCK_LORW_EXC_MASK)
+#if defined(PLOCK_LORW_INLINE_WAIT) && !defined(PLOCK_DISABLE_EBO)
+ lk = __pl_wait_unlock_long(lock, PLOCK_LORW_EXC_MASK);
+#else
+ lk = pl_wait_unlock_long(lock, PLOCK_LORW_EXC_MASK);
+#endif
+}
+
+
+__attribute__((unused,always_inline,no_instrument_function))
+static inline void pl_lorw_wrlock(unsigned long *lock)
+{
+ unsigned long lk = 0;
+ unsigned long old = 0;
+
+ /* first, make sure another writer is not already blocked waiting for
+ * readers to leave. Note that tests have shown that it can be even
+ * faster to avoid the first check and to unconditionally wait.
+ */
+ lk = pl_deref_long(lock);
+ if (__builtin_expect(lk & PLOCK_LORW_WRQ_MASK, 1))
+#if defined(PLOCK_LORW_INLINE_WAIT) && !defined(PLOCK_DISABLE_EBO)
+ lk = __pl_wait_unlock_long(lock, PLOCK_LORW_WRQ_MASK);
+#else
+ lk = pl_wait_unlock_long(lock, PLOCK_LORW_WRQ_MASK);
+#endif
+
+ do {
+ /* let's check for the two sources of contention at once */
+
+ if (__builtin_expect(lk & (PLOCK_LORW_SHR_MASK | PLOCK_LORW_EXC_MASK), 1)) {
+ /* check if there are still readers coming. If so, close the door and
+ * wait for them to leave.
+ */
+ if (lk & PLOCK_LORW_SHR_MASK) {
+ /* note below, an OR is significantly cheaper than BTS or XADD */
+ if (!(lk & PLOCK_LORW_WRQ_MASK))
+ pl_or_noret(lock, PLOCK_LORW_WRQ_BASE);
+#if defined(PLOCK_LORW_INLINE_WAIT) && !defined(PLOCK_DISABLE_EBO)
+ lk = __pl_wait_unlock_long(lock, PLOCK_LORW_SHR_MASK);
+#else
+ lk = pl_wait_unlock_long(lock, PLOCK_LORW_SHR_MASK);
+#endif
+ }
+
+ /* And also wait for a previous writer to finish. */
+ if (lk & PLOCK_LORW_EXC_MASK)
+#if defined(PLOCK_LORW_INLINE_WAIT) && !defined(PLOCK_DISABLE_EBO)
+ lk = __pl_wait_unlock_long(lock, PLOCK_LORW_EXC_MASK);
+#else
+ lk = pl_wait_unlock_long(lock, PLOCK_LORW_EXC_MASK);
+#endif
+ }
+
+ /* A fresh new reader may appear right now if there were none
+ * above and we didn't close the door.
+ */
+ old = lk & ~PLOCK_LORW_SHR_MASK & ~PLOCK_LORW_EXC_MASK;
+ lk = pl_cmpxchg(lock, old, old | PLOCK_LORW_EXC_BASE);
+ } while (lk != old);
+
+ /* done, not waiting anymore, the WRQ bit if any, will be dropped by the
+ * unlock
+ */
+}
+
+
+__attribute__((unused,always_inline,no_instrument_function))
+static inline void pl_lorw_rdunlock(unsigned long *lock)
+{
+ pl_sub_noret_rel(lock, PLOCK_LORW_SHR_BASE);
+}
+
+__attribute__((unused,always_inline,no_instrument_function))
+static inline void pl_lorw_wrunlock(unsigned long *lock)
+{
+ pl_and_noret_rel(lock, ~(PLOCK_LORW_WRQ_MASK | PLOCK_LORW_EXC_MASK));
+}
+
+__attribute__((unused,always_inline,no_instrument_function))
+static inline void pl_lorw_unlock(unsigned long *lock)
+{
+ if (pl_deref_long(lock) & PLOCK_LORW_EXC_MASK)
+ pl_lorw_wrunlock(lock);
+ else
+ pl_lorw_rdunlock(lock);
+}
+
+#endif /* PL_PLOCK_H */
diff --git a/include/import/sha1.h b/include/import/sha1.h
new file mode 100644
index 0000000..33ee530
--- /dev/null
+++ b/include/import/sha1.h
@@ -0,0 +1,35 @@
+/*
+ * Based on the git SHA1 Implementation.
+ *
+ * Copyright (C) 2009-2015, Linus Torvalds and others.
+ *
+ * SHA1 routine optimized to do word accesses rather than byte accesses,
+ * and to avoid unnecessary copies into the context array.
+ *
+ * This was initially based on the Mozilla SHA1 implementation, although
+ * none of the original Mozilla code remains.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+typedef struct {
+ unsigned long long size;
+ unsigned int H[5];
+ unsigned int W[16];
+} blk_SHA_CTX;
+
+void blk_SHA1_Init(blk_SHA_CTX *ctx);
+void blk_SHA1_Update(blk_SHA_CTX *ctx, const void *dataIn, unsigned long len);
+void blk_SHA1_Final(unsigned char hashout[20], blk_SHA_CTX *ctx);
diff --git a/include/import/slz-tables.h b/include/import/slz-tables.h
new file mode 100644
index 0000000..0b3a5b9
--- /dev/null
+++ b/include/import/slz-tables.h
@@ -0,0 +1,257 @@
+/* Fixed Huffman table as per RFC1951.
+ *
+ * Lit Value Bits Codes
+ * --------- ---- -----
+ * 0 - 143 8 00110000 through 10111111
+ * 144 - 255 9 110010000 through 111111111
+ * 256 - 279 7 0000000 through 0010111
+ * 280 - 287 8 11000000 through 11000111
+ *
+ * The codes are encoded in reverse, the high bit of the code appears encoded
+ * as bit 0. The table is built by mkhuff.sh. The 16 bits are encoded this way :
+ * - bits 0..3 : bits
+ * - bits 4..12 : code
+ */
+static const uint16_t fixed_huff[288] = {
+ 0x00c8, 0x08c8, 0x04c8, 0x0cc8, 0x02c8, 0x0ac8, 0x06c8, 0x0ec8, // 0
+ 0x01c8, 0x09c8, 0x05c8, 0x0dc8, 0x03c8, 0x0bc8, 0x07c8, 0x0fc8, // 8
+ 0x0028, 0x0828, 0x0428, 0x0c28, 0x0228, 0x0a28, 0x0628, 0x0e28, // 16
+ 0x0128, 0x0928, 0x0528, 0x0d28, 0x0328, 0x0b28, 0x0728, 0x0f28, // 24
+ 0x00a8, 0x08a8, 0x04a8, 0x0ca8, 0x02a8, 0x0aa8, 0x06a8, 0x0ea8, // 32
+ 0x01a8, 0x09a8, 0x05a8, 0x0da8, 0x03a8, 0x0ba8, 0x07a8, 0x0fa8, // 40
+ 0x0068, 0x0868, 0x0468, 0x0c68, 0x0268, 0x0a68, 0x0668, 0x0e68, // 48
+ 0x0168, 0x0968, 0x0568, 0x0d68, 0x0368, 0x0b68, 0x0768, 0x0f68, // 56
+ 0x00e8, 0x08e8, 0x04e8, 0x0ce8, 0x02e8, 0x0ae8, 0x06e8, 0x0ee8, // 64
+ 0x01e8, 0x09e8, 0x05e8, 0x0de8, 0x03e8, 0x0be8, 0x07e8, 0x0fe8, // 72
+ 0x0018, 0x0818, 0x0418, 0x0c18, 0x0218, 0x0a18, 0x0618, 0x0e18, // 80
+ 0x0118, 0x0918, 0x0518, 0x0d18, 0x0318, 0x0b18, 0x0718, 0x0f18, // 88
+ 0x0098, 0x0898, 0x0498, 0x0c98, 0x0298, 0x0a98, 0x0698, 0x0e98, // 96
+ 0x0198, 0x0998, 0x0598, 0x0d98, 0x0398, 0x0b98, 0x0798, 0x0f98, // 104
+ 0x0058, 0x0858, 0x0458, 0x0c58, 0x0258, 0x0a58, 0x0658, 0x0e58, // 112
+ 0x0158, 0x0958, 0x0558, 0x0d58, 0x0358, 0x0b58, 0x0758, 0x0f58, // 120
+ 0x00d8, 0x08d8, 0x04d8, 0x0cd8, 0x02d8, 0x0ad8, 0x06d8, 0x0ed8, // 128
+ 0x01d8, 0x09d8, 0x05d8, 0x0dd8, 0x03d8, 0x0bd8, 0x07d8, 0x0fd8, // 136
+ 0x0139, 0x1139, 0x0939, 0x1939, 0x0539, 0x1539, 0x0d39, 0x1d39, // 144
+ 0x0339, 0x1339, 0x0b39, 0x1b39, 0x0739, 0x1739, 0x0f39, 0x1f39, // 152
+ 0x00b9, 0x10b9, 0x08b9, 0x18b9, 0x04b9, 0x14b9, 0x0cb9, 0x1cb9, // 160
+ 0x02b9, 0x12b9, 0x0ab9, 0x1ab9, 0x06b9, 0x16b9, 0x0eb9, 0x1eb9, // 168
+ 0x01b9, 0x11b9, 0x09b9, 0x19b9, 0x05b9, 0x15b9, 0x0db9, 0x1db9, // 176
+ 0x03b9, 0x13b9, 0x0bb9, 0x1bb9, 0x07b9, 0x17b9, 0x0fb9, 0x1fb9, // 184
+ 0x0079, 0x1079, 0x0879, 0x1879, 0x0479, 0x1479, 0x0c79, 0x1c79, // 192
+ 0x0279, 0x1279, 0x0a79, 0x1a79, 0x0679, 0x1679, 0x0e79, 0x1e79, // 200
+ 0x0179, 0x1179, 0x0979, 0x1979, 0x0579, 0x1579, 0x0d79, 0x1d79, // 208
+ 0x0379, 0x1379, 0x0b79, 0x1b79, 0x0779, 0x1779, 0x0f79, 0x1f79, // 216
+ 0x00f9, 0x10f9, 0x08f9, 0x18f9, 0x04f9, 0x14f9, 0x0cf9, 0x1cf9, // 224
+ 0x02f9, 0x12f9, 0x0af9, 0x1af9, 0x06f9, 0x16f9, 0x0ef9, 0x1ef9, // 232
+ 0x01f9, 0x11f9, 0x09f9, 0x19f9, 0x05f9, 0x15f9, 0x0df9, 0x1df9, // 240
+ 0x03f9, 0x13f9, 0x0bf9, 0x1bf9, 0x07f9, 0x17f9, 0x0ff9, 0x1ff9, // 248
+ 0x0007, 0x0407, 0x0207, 0x0607, 0x0107, 0x0507, 0x0307, 0x0707, // 256
+ 0x0087, 0x0487, 0x0287, 0x0687, 0x0187, 0x0587, 0x0387, 0x0787, // 264
+ 0x0047, 0x0447, 0x0247, 0x0647, 0x0147, 0x0547, 0x0347, 0x0747, // 272
+ 0x0038, 0x0838, 0x0438, 0x0c38, 0x0238, 0x0a38, 0x0638, 0x0e38 // 280
+};
+
+/* length from 3 to 258 converted to bit strings for use with fixed huffman
+ * coding. It was built by tools/dump_len.c. The format is the following :
+ * - bits 0..15 = code
+ * - bits 16..19 = #bits
+ */
+static const uint32_t len_fh[259] = {
+ 0x000000, 0x000000, 0x000000, 0x070040, /* 0-3 */
+ 0x070020, 0x070060, 0x070010, 0x070050, /* 4-7 */
+ 0x070030, 0x070070, 0x070008, 0x080048, /* 8-11 */
+ 0x0800c8, 0x080028, 0x0800a8, 0x080068, /* 12-15 */
+ 0x0800e8, 0x080018, 0x080098, 0x090058, /* 16-19 */
+ 0x0900d8, 0x090158, 0x0901d8, 0x090038, /* 20-23 */
+ 0x0900b8, 0x090138, 0x0901b8, 0x090078, /* 24-27 */
+ 0x0900f8, 0x090178, 0x0901f8, 0x090004, /* 28-31 */
+ 0x090084, 0x090104, 0x090184, 0x0a0044, /* 32-35 */
+ 0x0a00c4, 0x0a0144, 0x0a01c4, 0x0a0244, /* 36-39 */
+ 0x0a02c4, 0x0a0344, 0x0a03c4, 0x0a0024, /* 40-43 */
+ 0x0a00a4, 0x0a0124, 0x0a01a4, 0x0a0224, /* 44-47 */
+ 0x0a02a4, 0x0a0324, 0x0a03a4, 0x0a0064, /* 48-51 */
+ 0x0a00e4, 0x0a0164, 0x0a01e4, 0x0a0264, /* 52-55 */
+ 0x0a02e4, 0x0a0364, 0x0a03e4, 0x0a0014, /* 56-59 */
+ 0x0a0094, 0x0a0114, 0x0a0194, 0x0a0214, /* 60-63 */
+ 0x0a0294, 0x0a0314, 0x0a0394, 0x0b0054, /* 64-67 */
+ 0x0b00d4, 0x0b0154, 0x0b01d4, 0x0b0254, /* 68-71 */
+ 0x0b02d4, 0x0b0354, 0x0b03d4, 0x0b0454, /* 72-75 */
+ 0x0b04d4, 0x0b0554, 0x0b05d4, 0x0b0654, /* 76-79 */
+ 0x0b06d4, 0x0b0754, 0x0b07d4, 0x0b0034, /* 80-83 */
+ 0x0b00b4, 0x0b0134, 0x0b01b4, 0x0b0234, /* 84-87 */
+ 0x0b02b4, 0x0b0334, 0x0b03b4, 0x0b0434, /* 88-91 */
+ 0x0b04b4, 0x0b0534, 0x0b05b4, 0x0b0634, /* 92-95 */
+ 0x0b06b4, 0x0b0734, 0x0b07b4, 0x0b0074, /* 96-99 */
+ 0x0b00f4, 0x0b0174, 0x0b01f4, 0x0b0274, /* 100-103 */
+ 0x0b02f4, 0x0b0374, 0x0b03f4, 0x0b0474, /* 104-107 */
+ 0x0b04f4, 0x0b0574, 0x0b05f4, 0x0b0674, /* 108-111 */
+ 0x0b06f4, 0x0b0774, 0x0b07f4, 0x0c0003, /* 112-115 */
+ 0x0c0103, 0x0c0203, 0x0c0303, 0x0c0403, /* 116-119 */
+ 0x0c0503, 0x0c0603, 0x0c0703, 0x0c0803, /* 120-123 */
+ 0x0c0903, 0x0c0a03, 0x0c0b03, 0x0c0c03, /* 124-127 */
+ 0x0c0d03, 0x0c0e03, 0x0c0f03, 0x0d0083, /* 128-131 */
+ 0x0d0183, 0x0d0283, 0x0d0383, 0x0d0483, /* 132-135 */
+ 0x0d0583, 0x0d0683, 0x0d0783, 0x0d0883, /* 136-139 */
+ 0x0d0983, 0x0d0a83, 0x0d0b83, 0x0d0c83, /* 140-143 */
+ 0x0d0d83, 0x0d0e83, 0x0d0f83, 0x0d1083, /* 144-147 */
+ 0x0d1183, 0x0d1283, 0x0d1383, 0x0d1483, /* 148-151 */
+ 0x0d1583, 0x0d1683, 0x0d1783, 0x0d1883, /* 152-155 */
+ 0x0d1983, 0x0d1a83, 0x0d1b83, 0x0d1c83, /* 156-159 */
+ 0x0d1d83, 0x0d1e83, 0x0d1f83, 0x0d0043, /* 160-163 */
+ 0x0d0143, 0x0d0243, 0x0d0343, 0x0d0443, /* 164-167 */
+ 0x0d0543, 0x0d0643, 0x0d0743, 0x0d0843, /* 168-171 */
+ 0x0d0943, 0x0d0a43, 0x0d0b43, 0x0d0c43, /* 172-175 */
+ 0x0d0d43, 0x0d0e43, 0x0d0f43, 0x0d1043, /* 176-179 */
+ 0x0d1143, 0x0d1243, 0x0d1343, 0x0d1443, /* 180-183 */
+ 0x0d1543, 0x0d1643, 0x0d1743, 0x0d1843, /* 184-187 */
+ 0x0d1943, 0x0d1a43, 0x0d1b43, 0x0d1c43, /* 188-191 */
+ 0x0d1d43, 0x0d1e43, 0x0d1f43, 0x0d00c3, /* 192-195 */
+ 0x0d01c3, 0x0d02c3, 0x0d03c3, 0x0d04c3, /* 196-199 */
+ 0x0d05c3, 0x0d06c3, 0x0d07c3, 0x0d08c3, /* 200-203 */
+ 0x0d09c3, 0x0d0ac3, 0x0d0bc3, 0x0d0cc3, /* 204-207 */
+ 0x0d0dc3, 0x0d0ec3, 0x0d0fc3, 0x0d10c3, /* 208-211 */
+ 0x0d11c3, 0x0d12c3, 0x0d13c3, 0x0d14c3, /* 212-215 */
+ 0x0d15c3, 0x0d16c3, 0x0d17c3, 0x0d18c3, /* 216-219 */
+ 0x0d19c3, 0x0d1ac3, 0x0d1bc3, 0x0d1cc3, /* 220-223 */
+ 0x0d1dc3, 0x0d1ec3, 0x0d1fc3, 0x0d0023, /* 224-227 */
+ 0x0d0123, 0x0d0223, 0x0d0323, 0x0d0423, /* 228-231 */
+ 0x0d0523, 0x0d0623, 0x0d0723, 0x0d0823, /* 232-235 */
+ 0x0d0923, 0x0d0a23, 0x0d0b23, 0x0d0c23, /* 236-239 */
+ 0x0d0d23, 0x0d0e23, 0x0d0f23, 0x0d1023, /* 240-243 */
+ 0x0d1123, 0x0d1223, 0x0d1323, 0x0d1423, /* 244-247 */
+ 0x0d1523, 0x0d1623, 0x0d1723, 0x0d1823, /* 248-251 */
+ 0x0d1923, 0x0d1a23, 0x0d1b23, 0x0d1c23, /* 252-255 */
+ 0x0d1d23, 0x0d1e23, 0x0800a3 /* 256-258 */
+};
+
+/* This horrible mess is needed to shut up the fallthrough warning since the
+ * stupid comment approach doesn't resist to separate preprocessing (e.g. as
+ * used in distcc). Note that compilers which support the fallthrough attribute
+ * also support __has_attribute.
+ */
+#ifndef __fallthrough
+# ifdef __has_attribute
+# if __has_attribute(fallthrough)
+# define __fallthrough __attribute__((fallthrough))
+# else
+# define __fallthrough do { } while (0)
+# endif
+# else
+# define __fallthrough do { } while (0)
+# endif
+#endif
+
+#if !defined(__ARM_FEATURE_CRC32)
+static uint32_t crc32_fast[4][256];
+#endif
+
+static uint32_t fh_dist_table[32768];
+
+#if !defined(__ARM_FEATURE_CRC32)
+/* Make the table for a fast CRC.
+ * Not thread-safe, must be called exactly once.
+ */
+static inline void __slz_make_crc_table(void)
+{
+ uint32_t c;
+ int n, k;
+
+ for (n = 0; n < 256; n++) {
+ c = (uint32_t) n ^ 255;
+ for (k = 0; k < 8; k++) {
+ if (c & 1) {
+ c = 0xedb88320 ^ (c >> 1);
+ } else {
+ c = c >> 1;
+ }
+ }
+ crc32_fast[0][n] = c ^ 0xff000000;
+ }
+
+ /* Note: here we *do not* have to invert the bits corresponding to the
+ * byte position, because [0] already has the 8 highest bits inverted,
+ * and these bits are shifted by 8 at the end of the operation, which
+ * results in having the next 8 bits shifted in turn. That's why we
+ * have the xor in the index used just after a computation.
+ */
+ for (n = 0; n < 256; n++) {
+ crc32_fast[1][n] = 0xff000000 ^ crc32_fast[0][(0xff000000 ^ crc32_fast[0][n] ^ 0xff) & 0xff] ^ (crc32_fast[0][n] >> 8);
+ crc32_fast[2][n] = 0xff000000 ^ crc32_fast[0][(0x00ff0000 ^ crc32_fast[1][n] ^ 0xff) & 0xff] ^ (crc32_fast[1][n] >> 8);
+ crc32_fast[3][n] = 0xff000000 ^ crc32_fast[0][(0x0000ff00 ^ crc32_fast[2][n] ^ 0xff) & 0xff] ^ (crc32_fast[2][n] >> 8);
+ }
+}
+#endif
+
+/* Returns code for lengths 1 to 32768. The bit size for the next value can be
+ * found this way :
+ *
+ * bits = code >> 1;
+ * if (bits)
+ * bits--;
+ *
+ */
+static inline uint32_t dist_to_code(uint32_t l)
+{
+ uint32_t code;
+
+ code = 0;
+ switch (l) {
+ case 24577 ... 32768: code++; __fallthrough;
+ case 16385 ... 24576: code++; __fallthrough;
+ case 12289 ... 16384: code++; __fallthrough;
+ case 8193 ... 12288: code++; __fallthrough;
+ case 6145 ... 8192: code++; __fallthrough;
+ case 4097 ... 6144: code++; __fallthrough;
+ case 3073 ... 4096: code++; __fallthrough;
+ case 2049 ... 3072: code++; __fallthrough;
+ case 1537 ... 2048: code++; __fallthrough;
+ case 1025 ... 1536: code++; __fallthrough;
+ case 769 ... 1024: code++; __fallthrough;
+ case 513 ... 768: code++; __fallthrough;
+ case 385 ... 512: code++; __fallthrough;
+ case 257 ... 384: code++; __fallthrough;
+ case 193 ... 256: code++; __fallthrough;
+ case 129 ... 192: code++; __fallthrough;
+ case 97 ... 128: code++; __fallthrough;
+ case 65 ... 96: code++; __fallthrough;
+ case 49 ... 64: code++; __fallthrough;
+ case 33 ... 48: code++; __fallthrough;
+ case 25 ... 32: code++; __fallthrough;
+ case 17 ... 24: code++; __fallthrough;
+ case 13 ... 16: code++; __fallthrough;
+ case 9 ... 12: code++; __fallthrough;
+ case 7 ... 8: code++; __fallthrough;
+ case 5 ... 6: code++; __fallthrough;
+ case 4 : code++; __fallthrough;
+ case 3 : code++; __fallthrough;
+ case 2 : code++;
+ }
+
+ return code;
+}
+
+/* not thread-safe, must be called exactly once */
+static inline void __slz_prepare_dist_table()
+{
+ uint32_t dist;
+ uint32_t code;
+ uint32_t bits;
+
+ for (dist = 0; dist < sizeof(fh_dist_table) / sizeof(*fh_dist_table); dist++) {
+ code = dist_to_code(dist + 1);
+ bits = code >> 1;
+ if (bits)
+ bits--;
+
+ /* Distance codes are stored on 5 bits reversed. The RFC
+ * doesn't state that they are reversed, but it's the only
+ * way it works.
+ */
+ code = ((code & 0x01) << 4) | ((code & 0x02) << 2) |
+ (code & 0x04) |
+ ((code & 0x08) >> 2) | ((code & 0x10) >> 4);
+
+ code += (dist & ((1 << bits) - 1)) << 5;
+ fh_dist_table[dist] = (code << 5) + bits + 5;
+ }
+}
diff --git a/include/import/slz.h b/include/import/slz.h
new file mode 100644
index 0000000..901a790
--- /dev/null
+++ b/include/import/slz.h
@@ -0,0 +1,200 @@
+/*
+ * Copyright (C) 2013-2015 Willy Tarreau <w@1wt.eu>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _SLZ_H
+#define _SLZ_H
+
+#include <inttypes.h>
+
+/* We have two macros UNALIGNED_LE_OK and UNALIGNED_FASTER. The latter indicates
+ * that using unaligned data is faster than a simple shift. On x86 32-bit at
+ * least it is not the case as the per-byte access is 30% faster. A core2-duo on
+ * x86_64 is 7% faster to read one byte + shifting by 8 than to read one word,
+ * but a core i5 is 7% faster doing the unaligned read, so we privilege more
+ * recent implementations here.
+ */
+#if defined(__x86_64__)
+#define UNALIGNED_LE_OK
+#define UNALIGNED_FASTER
+#define USE_64BIT_QUEUE
+#elif defined(__i386__) || defined(__i486__) || defined(__i586__) || defined(__i686__)
+#define UNALIGNED_LE_OK
+//#define UNALIGNED_FASTER
+#elif defined(__ARMEL__) && defined(__ARM_ARCH_7A__)
+#define UNALIGNED_LE_OK
+#define UNALIGNED_FASTER
+#elif defined(__ARM_ARCH_8A) || defined(__ARM_FEATURE_UNALIGNED)
+#define UNALIGNED_LE_OK
+#define UNALIGNED_FASTER
+#endif
+
+/* Log2 of the size of the hash table used for the references table. */
+#define HASH_BITS 13
+
+enum slz_state {
+ SLZ_ST_INIT, /* stream initialized */
+ SLZ_ST_EOB, /* header or end of block already sent */
+ SLZ_ST_FIXED, /* inside a fixed huffman sequence */
+ SLZ_ST_LAST, /* last block, BFINAL sent */
+ SLZ_ST_DONE, /* BFINAL+EOB sent BFINAL */
+ SLZ_ST_END /* end sent (BFINAL, EOB, CRC + len) */
+};
+
+enum {
+ SLZ_FMT_GZIP, /* RFC1952: gzip envelope and crc32 for CRC */
+ SLZ_FMT_ZLIB, /* RFC1950: zlib envelope and adler-32 for CRC */
+ SLZ_FMT_DEFLATE, /* RFC1951: raw deflate, and no crc */
+};
+
+struct slz_stream {
+#ifdef USE_64BIT_QUEUE
+ uint64_t queue; /* last pending bits, LSB first */
+#else
+ uint32_t queue; /* last pending bits, LSB first */
+#endif
+ uint32_t qbits; /* number of bits in queue, < 8 on 32-bit, < 32 on 64-bit */
+ unsigned char *outbuf; /* set by encode() */
+ uint16_t state; /* one of slz_state */
+ uint8_t level:1; /* 0 = no compression, 1 = compression */
+ uint8_t format:2; /* SLZ_FMT_* */
+ uint8_t unused1; /* unused for now */
+ uint32_t crc32;
+ uint32_t ilen;
+};
+
+/* Functions specific to rfc1951 (deflate) */
+long slz_rfc1951_encode(struct slz_stream *strm, unsigned char *out, const unsigned char *in, long ilen, int more);
+int slz_rfc1951_init(struct slz_stream *strm, int level);
+int slz_rfc1951_flush(struct slz_stream *strm, unsigned char *buf);
+int slz_rfc1951_finish(struct slz_stream *strm, unsigned char *buf);
+
+/* Functions specific to rfc1952 (gzip) */
+uint32_t slz_crc32_by1(uint32_t crc, const unsigned char *buf, int len);
+uint32_t slz_crc32_by4(uint32_t crc, const unsigned char *buf, int len);
+long slz_rfc1952_encode(struct slz_stream *strm, unsigned char *out, const unsigned char *in, long ilen, int more);
+int slz_rfc1952_send_header(struct slz_stream *strm, unsigned char *buf);
+int slz_rfc1952_init(struct slz_stream *strm, int level);
+int slz_rfc1952_flush(struct slz_stream *strm, unsigned char *buf);
+int slz_rfc1952_finish(struct slz_stream *strm, unsigned char *buf);
+
+/* Functions specific to rfc1950 (zlib) */
+uint32_t slz_adler32_by1(uint32_t crc, const unsigned char *buf, int len);
+uint32_t slz_adler32_block(uint32_t crc, const unsigned char *buf, long len);
+long slz_rfc1950_encode(struct slz_stream *strm, unsigned char *out, const unsigned char *in, long ilen, int more);
+int slz_rfc1950_send_header(struct slz_stream *strm, unsigned char *buf);
+int slz_rfc1950_init(struct slz_stream *strm, int level);
+int slz_rfc1950_flush(struct slz_stream *strm, unsigned char *buf);
+int slz_rfc1950_finish(struct slz_stream *strm, unsigned char *buf);
+
+/* generic functions */
+
+/* Initializes stream <strm>. It will configure the stream to use format
+ * <format> for the data, which must be one of SLZ_FMT_*. The compression level
+ * passed in <level> is set. This value can only be 0 (no compression) or 1
+ * (compression) and other values will lead to unpredictable behaviour. The
+ * function should always return 0.
+ */
+static inline int slz_init(struct slz_stream *strm, int level, int format)
+{
+ int ret;
+
+ if (format == SLZ_FMT_GZIP)
+ ret = slz_rfc1952_init(strm, level);
+ else if (format == SLZ_FMT_ZLIB)
+ ret = slz_rfc1950_init(strm, level);
+ else { /* deflate for anything else */
+ ret = slz_rfc1951_init(strm, level);
+ strm->format = format;
+ }
+ return ret;
+}
+
+/* Encodes the block according to the format used by the stream. This means
+ * that the CRC of the input block may be computed according to the CRC32 or
+ * adler-32 algorithms. The number of output bytes is returned.
+ */
+static inline long slz_encode(struct slz_stream *strm, void *out,
+ const void *in, long ilen, int more)
+{
+ long ret;
+
+ if (strm->format == SLZ_FMT_GZIP)
+ ret = slz_rfc1952_encode(strm, (unsigned char *) out, (const unsigned char *) in, ilen, more);
+ else if (strm->format == SLZ_FMT_ZLIB)
+ ret = slz_rfc1950_encode(strm, (unsigned char *) out, (const unsigned char *) in, ilen, more);
+ else /* deflate for other ones */
+ ret = slz_rfc1951_encode(strm, (unsigned char *) out, (const unsigned char *) in, ilen, more);
+
+ return ret;
+}
+
+/* Flushes pending bits and sends the trailer for stream <strm> into buffer
+ * <buf> if needed. When it's done, the stream state is updated to SLZ_ST_END.
+ * It returns the number of bytes emitted. The trailer consists in flushing the
+ * possibly pending bits from the queue (up to 24 bits), rounding to the next
+ * byte, then 4 bytes for the CRC when doing zlib/gzip, then another 4 bytes
+ * for the input length for gzip. That may about to 4+4+4 = 12 bytes, that the
+ * caller must ensure are available before calling the function. Note that if
+ * the initial header was never sent, it will be sent first as well (up to 10
+ * extra bytes).
+ */
+static inline int slz_finish(struct slz_stream *strm, void *buf)
+{
+ int ret;
+
+ if (strm->format == SLZ_FMT_GZIP)
+ ret = slz_rfc1952_finish(strm, (unsigned char *) buf);
+ else if (strm->format == SLZ_FMT_ZLIB)
+ ret = slz_rfc1950_finish(strm, (unsigned char *) buf);
+ else /* deflate for other ones */
+ ret = slz_rfc1951_finish(strm, (unsigned char *) buf);
+
+ return ret;
+}
+
+/* Flushes any pending data for stream <strm> into buffer <buf>, then emits an
+ * empty literal block to byte-align the output, allowing to completely flush
+ * the queue. Note that if the initial header was never sent, it will be sent
+ * first as well (0, 2 or 10 extra bytes). This requires that the output buffer
+ * still has this plus the size of the queue available (up to 4 bytes), plus
+ * one byte for (BFINAL,BTYPE), plus 4 bytes for LEN+NLEN, or a total of 19
+ * bytes in the worst case. The number of bytes emitted is returned. It is
+ * guaranteed that the queue is empty on return. This may cause some overhead
+ * by adding needless 5-byte blocks if called to often.
+ */
+static inline int slz_flush(struct slz_stream *strm, void *buf)
+{
+ int ret;
+
+ if (strm->format == SLZ_FMT_GZIP)
+ ret = slz_rfc1952_flush(strm, (unsigned char *) buf);
+ else if (strm->format == SLZ_FMT_ZLIB)
+ ret = slz_rfc1950_flush(strm, (unsigned char *) buf);
+ else /* deflate for other ones */
+ ret = slz_rfc1951_flush(strm, (unsigned char *) buf);
+
+ return ret;
+}
+
+#endif
diff --git a/include/import/xxhash.h b/include/import/xxhash.h
new file mode 100644
index 0000000..a18e8c7
--- /dev/null
+++ b/include/import/xxhash.h
@@ -0,0 +1,6773 @@
+/*
+ * xxHash - Extremely Fast Hash algorithm
+ * Header File
+ * Copyright (C) 2012-2021 Yann Collet
+ *
+ * BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You can contact the author at:
+ * - xxHash homepage: https://www.xxhash.com
+ * - xxHash source repository: https://github.com/Cyan4973/xxHash
+ */
+
+/*!
+ * @mainpage xxHash
+ *
+ * xxHash is an extremely fast non-cryptographic hash algorithm, working at RAM speed
+ * limits.
+ *
+ * It is proposed in four flavors, in three families:
+ * 1. @ref XXH32_family
+ * - Classic 32-bit hash function. Simple, compact, and runs on almost all
+ * 32-bit and 64-bit systems.
+ * 2. @ref XXH64_family
+ * - Classic 64-bit adaptation of XXH32. Just as simple, and runs well on most
+ * 64-bit systems (but _not_ 32-bit systems).
+ * 3. @ref XXH3_family
+ * - Modern 64-bit and 128-bit hash function family which features improved
+ * strength and performance across the board, especially on smaller data.
+ * It benefits greatly from SIMD and 64-bit without requiring it.
+ *
+ * Benchmarks
+ * ---
+ * The reference system uses an Intel i7-9700K CPU, and runs Ubuntu x64 20.04.
+ * The open source benchmark program is compiled with clang v10.0 using -O3 flag.
+ *
+ * | Hash Name | ISA ext | Width | Large Data Speed | Small Data Velocity |
+ * | -------------------- | ------- | ----: | ---------------: | ------------------: |
+ * | XXH3_64bits() | @b AVX2 | 64 | 59.4 GB/s | 133.1 |
+ * | MeowHash | AES-NI | 128 | 58.2 GB/s | 52.5 |
+ * | XXH3_128bits() | @b AVX2 | 128 | 57.9 GB/s | 118.1 |
+ * | CLHash | PCLMUL | 64 | 37.1 GB/s | 58.1 |
+ * | XXH3_64bits() | @b SSE2 | 64 | 31.5 GB/s | 133.1 |
+ * | XXH3_128bits() | @b SSE2 | 128 | 29.6 GB/s | 118.1 |
+ * | RAM sequential read | | N/A | 28.0 GB/s | N/A |
+ * | ahash | AES-NI | 64 | 22.5 GB/s | 107.2 |
+ * | City64 | | 64 | 22.0 GB/s | 76.6 |
+ * | T1ha2 | | 64 | 22.0 GB/s | 99.0 |
+ * | City128 | | 128 | 21.7 GB/s | 57.7 |
+ * | FarmHash | AES-NI | 64 | 21.3 GB/s | 71.9 |
+ * | XXH64() | | 64 | 19.4 GB/s | 71.0 |
+ * | SpookyHash | | 64 | 19.3 GB/s | 53.2 |
+ * | Mum | | 64 | 18.0 GB/s | 67.0 |
+ * | CRC32C | SSE4.2 | 32 | 13.0 GB/s | 57.9 |
+ * | XXH32() | | 32 | 9.7 GB/s | 71.9 |
+ * | City32 | | 32 | 9.1 GB/s | 66.0 |
+ * | Blake3* | @b AVX2 | 256 | 4.4 GB/s | 8.1 |
+ * | Murmur3 | | 32 | 3.9 GB/s | 56.1 |
+ * | SipHash* | | 64 | 3.0 GB/s | 43.2 |
+ * | Blake3* | @b SSE2 | 256 | 2.4 GB/s | 8.1 |
+ * | HighwayHash | | 64 | 1.4 GB/s | 6.0 |
+ * | FNV64 | | 64 | 1.2 GB/s | 62.7 |
+ * | Blake2* | | 256 | 1.1 GB/s | 5.1 |
+ * | SHA1* | | 160 | 0.8 GB/s | 5.6 |
+ * | MD5* | | 128 | 0.6 GB/s | 7.8 |
+ * @note
+ * - Hashes which require a specific ISA extension are noted. SSE2 is also noted,
+ * even though it is mandatory on x64.
+ * - Hashes with an asterisk are cryptographic. Note that MD5 is non-cryptographic
+ * by modern standards.
+ * - Small data velocity is a rough average of algorithm's efficiency for small
+ * data. For more accurate information, see the wiki.
+ * - More benchmarks and strength tests are found on the wiki:
+ * https://github.com/Cyan4973/xxHash/wiki
+ *
+ * Usage
+ * ------
+ * All xxHash variants use a similar API. Changing the algorithm is a trivial
+ * substitution.
+ *
+ * @pre
+ * For functions which take an input and length parameter, the following
+ * requirements are assumed:
+ * - The range from [`input`, `input + length`) is valid, readable memory.
+ * - The only exception is if the `length` is `0`, `input` may be `NULL`.
+ * - For C++, the objects must have the *TriviallyCopyable* property, as the
+ * functions access bytes directly as if it was an array of `unsigned char`.
+ *
+ * @anchor single_shot_example
+ * **Single Shot**
+ *
+ * These functions are stateless functions which hash a contiguous block of memory,
+ * immediately returning the result. They are the easiest and usually the fastest
+ * option.
+ *
+ * XXH32(), XXH64(), XXH3_64bits(), XXH3_128bits()
+ *
+ * @code{.c}
+ * #include <string.h>
+ * #include "xxhash.h"
+ *
+ * // Example for a function which hashes a null terminated string with XXH32().
+ * XXH32_hash_t hash_string(const char* string, XXH32_hash_t seed)
+ * {
+ * // NULL pointers are only valid if the length is zero
+ * size_t length = (string == NULL) ? 0 : strlen(string);
+ * return XXH32(string, length, seed);
+ * }
+ * @endcode
+ *
+ * @anchor streaming_example
+ * **Streaming**
+ *
+ * These groups of functions allow incremental hashing of unknown size, even
+ * more than what would fit in a size_t.
+ *
+ * XXH32_reset(), XXH64_reset(), XXH3_64bits_reset(), XXH3_128bits_reset()
+ *
+ * @code{.c}
+ * #include <stdio.h>
+ * #include <assert.h>
+ * #include "xxhash.h"
+ * // Example for a function which hashes a FILE incrementally with XXH3_64bits().
+ * XXH64_hash_t hashFile(FILE* f)
+ * {
+ * // Allocate a state struct. Do not just use malloc() or new.
+ * XXH3_state_t* state = XXH3_createState();
+ * assert(state != NULL && "Out of memory!");
+ * // Reset the state to start a new hashing session.
+ * XXH3_64bits_reset(state);
+ * char buffer[4096];
+ * size_t count;
+ * // Read the file in chunks
+ * while ((count = fread(buffer, 1, sizeof(buffer), f)) != 0) {
+ * // Run update() as many times as necessary to process the data
+ * XXH3_64bits_update(state, buffer, count);
+ * }
+ * // Retrieve the finalized hash. This will not change the state.
+ * XXH64_hash_t result = XXH3_64bits_digest(state);
+ * // Free the state. Do not use free().
+ * XXH3_freeState(state);
+ * return result;
+ * }
+ * @endcode
+ *
+ * @file xxhash.h
+ * xxHash prototypes and implementation
+ */
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/* ****************************
+ * INLINE mode
+ ******************************/
+/*!
+ * @defgroup public Public API
+ * Contains details on the public xxHash functions.
+ * @{
+ */
+#ifdef XXH_DOXYGEN
+/*!
+ * @brief Gives access to internal state declaration, required for static allocation.
+ *
+ * Incompatible with dynamic linking, due to risks of ABI changes.
+ *
+ * Usage:
+ * @code{.c}
+ * #define XXH_STATIC_LINKING_ONLY
+ * #include "xxhash.h"
+ * @endcode
+ */
+# define XXH_STATIC_LINKING_ONLY
+/* Do not undef XXH_STATIC_LINKING_ONLY for Doxygen */
+
+/*!
+ * @brief Gives access to internal definitions.
+ *
+ * Usage:
+ * @code{.c}
+ * #define XXH_STATIC_LINKING_ONLY
+ * #define XXH_IMPLEMENTATION
+ * #include "xxhash.h"
+ * @endcode
+ */
+# define XXH_IMPLEMENTATION
+/* Do not undef XXH_IMPLEMENTATION for Doxygen */
+
+/*!
+ * @brief Exposes the implementation and marks all functions as `inline`.
+ *
+ * Use these build macros to inline xxhash into the target unit.
+ * Inlining improves performance on small inputs, especially when the length is
+ * expressed as a compile-time constant:
+ *
+ * https://fastcompression.blogspot.com/2018/03/xxhash-for-small-keys-impressive-power.html
+ *
+ * It also keeps xxHash symbols private to the unit, so they are not exported.
+ *
+ * Usage:
+ * @code{.c}
+ * #define XXH_INLINE_ALL
+ * #include "xxhash.h"
+ * @endcode
+ * Do not compile and link xxhash.o as a separate object, as it is not useful.
+ */
+# define XXH_INLINE_ALL
+# undef XXH_INLINE_ALL
+/*!
+ * @brief Exposes the implementation without marking functions as inline.
+ */
+# define XXH_PRIVATE_API
+# undef XXH_PRIVATE_API
+/*!
+ * @brief Emulate a namespace by transparently prefixing all symbols.
+ *
+ * If you want to include _and expose_ xxHash functions from within your own
+ * library, but also want to avoid symbol collisions with other libraries which
+ * may also include xxHash, you can use @ref XXH_NAMESPACE to automatically prefix
+ * any public symbol from xxhash library with the value of @ref XXH_NAMESPACE
+ * (therefore, avoid empty or numeric values).
+ *
+ * Note that no change is required within the calling program as long as it
+ * includes `xxhash.h`: Regular symbol names will be automatically translated
+ * by this header.
+ */
+# define XXH_NAMESPACE /* YOUR NAME HERE */
+# undef XXH_NAMESPACE
+#endif
+
+#if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)) \
+ && !defined(XXH_INLINE_ALL_31684351384)
+ /* this section should be traversed only once */
+# define XXH_INLINE_ALL_31684351384
+ /* give access to the advanced API, required to compile implementations */
+# undef XXH_STATIC_LINKING_ONLY /* avoid macro redef */
+# define XXH_STATIC_LINKING_ONLY
+ /* make all functions private */
+# undef XXH_PUBLIC_API
+# if defined(__GNUC__)
+# define XXH_PUBLIC_API static __inline __attribute__((unused))
+# elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+# define XXH_PUBLIC_API static inline
+# elif defined(_MSC_VER)
+# define XXH_PUBLIC_API static __inline
+# else
+ /* note: this version may generate warnings for unused static functions */
+# define XXH_PUBLIC_API static
+# endif
+
+ /*
+ * This part deals with the special case where a unit wants to inline xxHash,
+ * but "xxhash.h" has previously been included without XXH_INLINE_ALL,
+ * such as part of some previously included *.h header file.
+ * Without further action, the new include would just be ignored,
+ * and functions would effectively _not_ be inlined (silent failure).
+ * The following macros solve this situation by prefixing all inlined names,
+ * avoiding naming collision with previous inclusions.
+ */
+ /* Before that, we unconditionally #undef all symbols,
+ * in case they were already defined with XXH_NAMESPACE.
+ * They will then be redefined for XXH_INLINE_ALL
+ */
+# undef XXH_versionNumber
+ /* XXH32 */
+# undef XXH32
+# undef XXH32_createState
+# undef XXH32_freeState
+# undef XXH32_reset
+# undef XXH32_update
+# undef XXH32_digest
+# undef XXH32_copyState
+# undef XXH32_canonicalFromHash
+# undef XXH32_hashFromCanonical
+ /* XXH64 */
+# undef XXH64
+# undef XXH64_createState
+# undef XXH64_freeState
+# undef XXH64_reset
+# undef XXH64_update
+# undef XXH64_digest
+# undef XXH64_copyState
+# undef XXH64_canonicalFromHash
+# undef XXH64_hashFromCanonical
+ /* XXH3_64bits */
+# undef XXH3_64bits
+# undef XXH3_64bits_withSecret
+# undef XXH3_64bits_withSeed
+# undef XXH3_64bits_withSecretandSeed
+# undef XXH3_createState
+# undef XXH3_freeState
+# undef XXH3_copyState
+# undef XXH3_64bits_reset
+# undef XXH3_64bits_reset_withSeed
+# undef XXH3_64bits_reset_withSecret
+# undef XXH3_64bits_update
+# undef XXH3_64bits_digest
+# undef XXH3_generateSecret
+ /* XXH3_128bits */
+# undef XXH128
+# undef XXH3_128bits
+# undef XXH3_128bits_withSeed
+# undef XXH3_128bits_withSecret
+# undef XXH3_128bits_reset
+# undef XXH3_128bits_reset_withSeed
+# undef XXH3_128bits_reset_withSecret
+# undef XXH3_128bits_reset_withSecretandSeed
+# undef XXH3_128bits_update
+# undef XXH3_128bits_digest
+# undef XXH128_isEqual
+# undef XXH128_cmp
+# undef XXH128_canonicalFromHash
+# undef XXH128_hashFromCanonical
+ /* Finally, free the namespace itself */
+# undef XXH_NAMESPACE
+
+ /* employ the namespace for XXH_INLINE_ALL */
+# define XXH_NAMESPACE XXH_INLINE_
+ /*
+ * Some identifiers (enums, type names) are not symbols,
+ * but they must nonetheless be renamed to avoid redeclaration.
+ * Alternative solution: do not redeclare them.
+ * However, this requires some #ifdefs, and has a more dispersed impact.
+ * Meanwhile, renaming can be achieved in a single place.
+ */
+# define XXH_IPREF(Id) XXH_NAMESPACE ## Id
+# define XXH_OK XXH_IPREF(XXH_OK)
+# define XXH_ERROR XXH_IPREF(XXH_ERROR)
+# define XXH_errorcode XXH_IPREF(XXH_errorcode)
+# define XXH32_canonical_t XXH_IPREF(XXH32_canonical_t)
+# define XXH64_canonical_t XXH_IPREF(XXH64_canonical_t)
+# define XXH128_canonical_t XXH_IPREF(XXH128_canonical_t)
+# define XXH32_state_s XXH_IPREF(XXH32_state_s)
+# define XXH32_state_t XXH_IPREF(XXH32_state_t)
+# define XXH64_state_s XXH_IPREF(XXH64_state_s)
+# define XXH64_state_t XXH_IPREF(XXH64_state_t)
+# define XXH3_state_s XXH_IPREF(XXH3_state_s)
+# define XXH3_state_t XXH_IPREF(XXH3_state_t)
+# define XXH128_hash_t XXH_IPREF(XXH128_hash_t)
+ /* Ensure the header is parsed again, even if it was previously included */
+# undef XXHASH_H_5627135585666179
+# undef XXHASH_H_STATIC_13879238742
+#endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */
+
+/* ****************************************************************
+ * Stable API
+ *****************************************************************/
+#ifndef XXHASH_H_5627135585666179
+#define XXHASH_H_5627135585666179 1
+
+/*! @brief Marks a global symbol. */
+#if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API)
+# if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT))
+# ifdef XXH_EXPORT
+# define XXH_PUBLIC_API __declspec(dllexport)
+# elif XXH_IMPORT
+# define XXH_PUBLIC_API __declspec(dllimport)
+# endif
+# else
+# define XXH_PUBLIC_API /* do nothing */
+# endif
+#endif
+
+#ifdef XXH_NAMESPACE
+# define XXH_CAT(A,B) A##B
+# define XXH_NAME2(A,B) XXH_CAT(A,B)
+# define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
+/* XXH32 */
+# define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
+# define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
+# define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
+# define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
+# define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)
+# define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
+# define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
+# define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
+# define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
+/* XXH64 */
+# define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
+# define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
+# define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
+# define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
+# define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
+# define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
+# define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
+# define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
+# define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
+/* XXH3_64bits */
+# define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits)
+# define XXH3_64bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret)
+# define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed)
+# define XXH3_64bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecretandSeed)
+# define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState)
+# define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState)
+# define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState)
+# define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset)
+# define XXH3_64bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed)
+# define XXH3_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret)
+# define XXH3_64bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecretandSeed)
+# define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update)
+# define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest)
+# define XXH3_generateSecret XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret)
+# define XXH3_generateSecret_fromSeed XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret_fromSeed)
+/* XXH3_128bits */
+# define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128)
+# define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits)
+# define XXH3_128bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed)
+# define XXH3_128bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret)
+# define XXH3_128bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecretandSeed)
+# define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset)
+# define XXH3_128bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed)
+# define XXH3_128bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret)
+# define XXH3_128bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecretandSeed)
+# define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update)
+# define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest)
+# define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual)
+# define XXH128_cmp XXH_NAME2(XXH_NAMESPACE, XXH128_cmp)
+# define XXH128_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash)
+# define XXH128_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical)
+#endif
+
+
+/* *************************************
+* Compiler specifics
+***************************************/
+
+/* specific declaration modes for Windows */
+#if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API)
+# if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT))
+# ifdef XXH_EXPORT
+# define XXH_PUBLIC_API __declspec(dllexport)
+# elif XXH_IMPORT
+# define XXH_PUBLIC_API __declspec(dllimport)
+# endif
+# else
+# define XXH_PUBLIC_API /* do nothing */
+# endif
+#endif
+
+#if defined (__GNUC__)
+# define XXH_CONSTF __attribute__((const))
+# define XXH_PUREF __attribute__((pure))
+# define XXH_MALLOCF __attribute__((malloc))
+#else
+# define XXH_CONSTF /* disable */
+# define XXH_PUREF
+# define XXH_MALLOCF
+#endif
+
+/* *************************************
+* Version
+***************************************/
+#define XXH_VERSION_MAJOR 0
+#define XXH_VERSION_MINOR 8
+#define XXH_VERSION_RELEASE 2
+/*! @brief Version number, encoded as two digits each */
+#define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
+
+/*!
+ * @brief Obtains the xxHash version.
+ *
+ * This is mostly useful when xxHash is compiled as a shared library,
+ * since the returned value comes from the library, as opposed to header file.
+ *
+ * @return @ref XXH_VERSION_NUMBER of the invoked library.
+ */
+XXH_PUBLIC_API XXH_CONSTF unsigned XXH_versionNumber (void);
+
+
+/* ****************************
+* Common basic types
+******************************/
+#include <stddef.h> /* size_t */
+/*!
+ * @brief Exit code for the streaming API.
+ */
+typedef enum {
+ XXH_OK = 0, /*!< OK */
+ XXH_ERROR /*!< Error */
+} XXH_errorcode;
+
+
+/*-**********************************************************************
+* 32-bit hash
+************************************************************************/
+#if defined(XXH_DOXYGEN) /* Don't show <stdint.h> include */
+/*!
+ * @brief An unsigned 32-bit integer.
+ *
+ * Not necessarily defined to `uint32_t` but functionally equivalent.
+ */
+typedef uint32_t XXH32_hash_t;
+
+#elif !defined (__VMS) \
+ && (defined (__cplusplus) \
+ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
+# include <stdint.h>
+ typedef uint32_t XXH32_hash_t;
+
+#else
+# include <limits.h>
+# if UINT_MAX == 0xFFFFFFFFUL
+ typedef unsigned int XXH32_hash_t;
+# elif ULONG_MAX == 0xFFFFFFFFUL
+ typedef unsigned long XXH32_hash_t;
+# else
+# error "unsupported platform: need a 32-bit type"
+# endif
+#endif
+
+/*!
+ * @}
+ *
+ * @defgroup XXH32_family XXH32 family
+ * @ingroup public
+ * Contains functions used in the classic 32-bit xxHash algorithm.
+ *
+ * @note
+ * XXH32 is useful for older platforms, with no or poor 64-bit performance.
+ * Note that the @ref XXH3_family provides competitive speed for both 32-bit
+ * and 64-bit systems, and offers true 64/128 bit hash results.
+ *
+ * @see @ref XXH64_family, @ref XXH3_family : Other xxHash families
+ * @see @ref XXH32_impl for implementation details
+ * @{
+ */
+
+/*!
+ * @brief Calculates the 32-bit hash of @p input using xxHash32.
+ *
+ * Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark): 5.4 GB/s
+ *
+ * See @ref single_shot_example "Single Shot Example" for an example.
+ *
+ * @param input The block of data to be hashed, at least @p length bytes in size.
+ * @param length The length of @p input, in bytes.
+ * @param seed The 32-bit seed to alter the hash's output predictably.
+ *
+ * @pre
+ * The memory between @p input and @p input + @p length must be valid,
+ * readable, contiguous memory. However, if @p length is `0`, @p input may be
+ * `NULL`. In C++, this also must be *TriviallyCopyable*.
+ *
+ * @return The calculated 32-bit hash value.
+ *
+ * @see
+ * XXH64(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128():
+ * Direct equivalents for the other variants of xxHash.
+ * @see
+ * XXH32_createState(), XXH32_update(), XXH32_digest(): Streaming version.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_t seed);
+
+#ifndef XXH_NO_STREAM
+/*!
+ * Streaming functions generate the xxHash value from an incremental input.
+ * This method is slower than single-call functions, due to state management.
+ * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized.
+ *
+ * An XXH state must first be allocated using `XXH*_createState()`.
+ *
+ * Start a new hash by initializing the state with a seed using `XXH*_reset()`.
+ *
+ * Then, feed the hash state by calling `XXH*_update()` as many times as necessary.
+ *
+ * The function returns an error code, with 0 meaning OK, and any other value
+ * meaning there is an error.
+ *
+ * Finally, a hash value can be produced anytime, by using `XXH*_digest()`.
+ * This function returns the nn-bits hash as an int or long long.
+ *
+ * It's still possible to continue inserting input into the hash state after a
+ * digest, and generate new hash values later on by invoking `XXH*_digest()`.
+ *
+ * When done, release the state using `XXH*_freeState()`.
+ *
+ * @see streaming_example at the top of @ref xxhash.h for an example.
+ */
+
+/*!
+ * @typedef struct XXH32_state_s XXH32_state_t
+ * @brief The opaque state struct for the XXH32 streaming API.
+ *
+ * @see XXH32_state_s for details.
+ */
+typedef struct XXH32_state_s XXH32_state_t;
+
+/*!
+ * @brief Allocates an @ref XXH32_state_t.
+ *
+ * Must be freed with XXH32_freeState().
+ * @return An allocated XXH32_state_t on success, `NULL` on failure.
+ */
+XXH_PUBLIC_API XXH_MALLOCF XXH32_state_t* XXH32_createState(void);
+/*!
+ * @brief Frees an @ref XXH32_state_t.
+ *
+ * Must be allocated with XXH32_createState().
+ * @param statePtr A pointer to an @ref XXH32_state_t allocated with @ref XXH32_createState().
+ * @return XXH_OK.
+ */
+XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr);
+/*!
+ * @brief Copies one @ref XXH32_state_t to another.
+ *
+ * @param dst_state The state to copy to.
+ * @param src_state The state to copy from.
+ * @pre
+ * @p dst_state and @p src_state must not be `NULL` and must not overlap.
+ */
+XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state);
+
+/*!
+ * @brief Resets an @ref XXH32_state_t to begin a new hash.
+ *
+ * This function resets and seeds a state. Call it before @ref XXH32_update().
+ *
+ * @param statePtr The state struct to reset.
+ * @param seed The 32-bit seed to alter the hash result predictably.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ *
+ * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
+ */
+XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, XXH32_hash_t seed);
+
+/*!
+ * @brief Consumes a block of @p input to an @ref XXH32_state_t.
+ *
+ * Call this to incrementally consume blocks of data.
+ *
+ * @param statePtr The state struct to update.
+ * @param input The block of data to be hashed, at least @p length bytes in size.
+ * @param length The length of @p input, in bytes.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ * @pre
+ * The memory between @p input and @p input + @p length must be valid,
+ * readable, contiguous memory. However, if @p length is `0`, @p input may be
+ * `NULL`. In C++, this also must be *TriviallyCopyable*.
+ *
+ * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
+ */
+XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length);
+
+/*!
+ * @brief Returns the calculated hash value from an @ref XXH32_state_t.
+ *
+ * @note
+ * Calling XXH32_digest() will not affect @p statePtr, so you can update,
+ * digest, and update again.
+ *
+ * @param statePtr The state struct to calculate the hash from.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ *
+ * @return The calculated xxHash32 value from that state.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr);
+#endif /* !XXH_NO_STREAM */
+
+/******* Canonical representation *******/
+
+/*
+ * The default return values from XXH functions are unsigned 32 and 64 bit
+ * integers.
+ * This the simplest and fastest format for further post-processing.
+ *
+ * However, this leaves open the question of what is the order on the byte level,
+ * since little and big endian conventions will store the same number differently.
+ *
+ * The canonical representation settles this issue by mandating big-endian
+ * convention, the same convention as human-readable numbers (large digits first).
+ *
+ * When writing hash values to storage, sending them over a network, or printing
+ * them, it's highly recommended to use the canonical representation to ensure
+ * portability across a wider range of systems, present and future.
+ *
+ * The following functions allow transformation of hash values to and from
+ * canonical format.
+ */
+
+/*!
+ * @brief Canonical (big endian) representation of @ref XXH32_hash_t.
+ */
+typedef struct {
+ unsigned char digest[4]; /*!< Hash bytes, big endian */
+} XXH32_canonical_t;
+
+/*!
+ * @brief Converts an @ref XXH32_hash_t to a big endian @ref XXH32_canonical_t.
+ *
+ * @param dst The @ref XXH32_canonical_t pointer to be stored to.
+ * @param hash The @ref XXH32_hash_t to be converted.
+ *
+ * @pre
+ * @p dst must not be `NULL`.
+ */
+XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash);
+
+/*!
+ * @brief Converts an @ref XXH32_canonical_t to a native @ref XXH32_hash_t.
+ *
+ * @param src The @ref XXH32_canonical_t to convert.
+ *
+ * @pre
+ * @p src must not be `NULL`.
+ *
+ * @return The converted hash.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src);
+
+
+/*! @cond Doxygen ignores this part */
+#ifdef __has_attribute
+# define XXH_HAS_ATTRIBUTE(x) __has_attribute(x)
+#else
+# define XXH_HAS_ATTRIBUTE(x) 0
+#endif
+/*! @endcond */
+
+/*! @cond Doxygen ignores this part */
+/*
+ * C23 __STDC_VERSION__ number hasn't been specified yet. For now
+ * leave as `201711L` (C17 + 1).
+ * TODO: Update to correct value when its been specified.
+ */
+#define XXH_C23_VN 201711L
+/*! @endcond */
+
+/*! @cond Doxygen ignores this part */
+/* C-language Attributes are added in C23. */
+#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= XXH_C23_VN) && defined(__has_c_attribute)
+# define XXH_HAS_C_ATTRIBUTE(x) __has_c_attribute(x)
+#else
+# define XXH_HAS_C_ATTRIBUTE(x) 0
+#endif
+/*! @endcond */
+
+/*! @cond Doxygen ignores this part */
+#if defined(__cplusplus) && defined(__has_cpp_attribute)
+# define XXH_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x)
+#else
+# define XXH_HAS_CPP_ATTRIBUTE(x) 0
+#endif
+/*! @endcond */
+
+/*! @cond Doxygen ignores this part */
+/*
+ * Define XXH_FALLTHROUGH macro for annotating switch case with the 'fallthrough' attribute
+ * introduced in CPP17 and C23.
+ * CPP17 : https://en.cppreference.com/w/cpp/language/attributes/fallthrough
+ * C23 : https://en.cppreference.com/w/c/language/attributes/fallthrough
+ */
+#if XXH_HAS_C_ATTRIBUTE(fallthrough) || XXH_HAS_CPP_ATTRIBUTE(fallthrough)
+# define XXH_FALLTHROUGH [[fallthrough]]
+#elif XXH_HAS_ATTRIBUTE(__fallthrough__)
+# define XXH_FALLTHROUGH __attribute__ ((__fallthrough__))
+#else
+# define XXH_FALLTHROUGH /* fallthrough */
+#endif
+/*! @endcond */
+
+/*! @cond Doxygen ignores this part */
+/*
+ * Define XXH_NOESCAPE for annotated pointers in public API.
+ * https://clang.llvm.org/docs/AttributeReference.html#noescape
+ * As of writing this, only supported by clang.
+ */
+#if XXH_HAS_ATTRIBUTE(noescape)
+# define XXH_NOESCAPE __attribute__((noescape))
+#else
+# define XXH_NOESCAPE
+#endif
+/*! @endcond */
+
+
+/*!
+ * @}
+ * @ingroup public
+ * @{
+ */
+
+#ifndef XXH_NO_LONG_LONG
+/*-**********************************************************************
+* 64-bit hash
+************************************************************************/
+#if defined(XXH_DOXYGEN) /* don't include <stdint.h> */
+/*!
+ * @brief An unsigned 64-bit integer.
+ *
+ * Not necessarily defined to `uint64_t` but functionally equivalent.
+ */
+typedef uint64_t XXH64_hash_t;
+#elif !defined (__VMS) \
+ && (defined (__cplusplus) \
+ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
+# include <stdint.h>
+ typedef uint64_t XXH64_hash_t;
+#else
+# include <limits.h>
+# if defined(__LP64__) && ULONG_MAX == 0xFFFFFFFFFFFFFFFFULL
+ /* LP64 ABI says uint64_t is unsigned long */
+ typedef unsigned long XXH64_hash_t;
+# else
+ /* the following type must have a width of 64-bit */
+ typedef unsigned long long XXH64_hash_t;
+# endif
+#endif
+
+/*!
+ * @}
+ *
+ * @defgroup XXH64_family XXH64 family
+ * @ingroup public
+ * @{
+ * Contains functions used in the classic 64-bit xxHash algorithm.
+ *
+ * @note
+ * XXH3 provides competitive speed for both 32-bit and 64-bit systems,
+ * and offers true 64/128 bit hash results.
+ * It provides better speed for systems with vector processing capabilities.
+ */
+
+/*!
+ * @brief Calculates the 64-bit hash of @p input using xxHash64.
+ *
+ * This function usually runs faster on 64-bit systems, but slower on 32-bit
+ * systems (see benchmark).
+ *
+ * @param input The block of data to be hashed, at least @p length bytes in size.
+ * @param length The length of @p input, in bytes.
+ * @param seed The 64-bit seed to alter the hash's output predictably.
+ *
+ * @pre
+ * The memory between @p input and @p input + @p length must be valid,
+ * readable, contiguous memory. However, if @p length is `0`, @p input may be
+ * `NULL`. In C++, this also must be *TriviallyCopyable*.
+ *
+ * @return The calculated 64-bit hash.
+ *
+ * @see
+ * XXH32(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128():
+ * Direct equivalents for the other variants of xxHash.
+ * @see
+ * XXH64_createState(), XXH64_update(), XXH64_digest(): Streaming version.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed);
+
+/******* Streaming *******/
+#ifndef XXH_NO_STREAM
+/*!
+ * @brief The opaque state struct for the XXH64 streaming API.
+ *
+ * @see XXH64_state_s for details.
+ */
+typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */
+
+/*!
+ * @brief Allocates an @ref XXH64_state_t.
+ *
+ * Must be freed with XXH64_freeState().
+ * @return An allocated XXH64_state_t on success, `NULL` on failure.
+ */
+XXH_PUBLIC_API XXH_MALLOCF XXH64_state_t* XXH64_createState(void);
+
+/*!
+ * @brief Frees an @ref XXH64_state_t.
+ *
+ * Must be allocated with XXH64_createState().
+ * @param statePtr A pointer to an @ref XXH64_state_t allocated with @ref XXH64_createState().
+ * @return XXH_OK.
+ */
+XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr);
+
+/*!
+ * @brief Copies one @ref XXH64_state_t to another.
+ *
+ * @param dst_state The state to copy to.
+ * @param src_state The state to copy from.
+ * @pre
+ * @p dst_state and @p src_state must not be `NULL` and must not overlap.
+ */
+XXH_PUBLIC_API void XXH64_copyState(XXH_NOESCAPE XXH64_state_t* dst_state, const XXH64_state_t* src_state);
+
+/*!
+ * @brief Resets an @ref XXH64_state_t to begin a new hash.
+ *
+ * This function resets and seeds a state. Call it before @ref XXH64_update().
+ *
+ * @param statePtr The state struct to reset.
+ * @param seed The 64-bit seed to alter the hash result predictably.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ *
+ * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
+ */
+XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH_NOESCAPE XXH64_state_t* statePtr, XXH64_hash_t seed);
+
+/*!
+ * @brief Consumes a block of @p input to an @ref XXH64_state_t.
+ *
+ * Call this to incrementally consume blocks of data.
+ *
+ * @param statePtr The state struct to update.
+ * @param input The block of data to be hashed, at least @p length bytes in size.
+ * @param length The length of @p input, in bytes.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ * @pre
+ * The memory between @p input and @p input + @p length must be valid,
+ * readable, contiguous memory. However, if @p length is `0`, @p input may be
+ * `NULL`. In C++, this also must be *TriviallyCopyable*.
+ *
+ * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
+ */
+XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH_NOESCAPE XXH64_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length);
+
+/*!
+ * @brief Returns the calculated hash value from an @ref XXH64_state_t.
+ *
+ * @note
+ * Calling XXH64_digest() will not affect @p statePtr, so you can update,
+ * digest, and update again.
+ *
+ * @param statePtr The state struct to calculate the hash from.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ *
+ * @return The calculated xxHash64 value from that state.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64_digest (XXH_NOESCAPE const XXH64_state_t* statePtr);
+#endif /* !XXH_NO_STREAM */
+/******* Canonical representation *******/
+
+/*!
+ * @brief Canonical (big endian) representation of @ref XXH64_hash_t.
+ */
+typedef struct { unsigned char digest[sizeof(XXH64_hash_t)]; } XXH64_canonical_t;
+
+/*!
+ * @brief Converts an @ref XXH64_hash_t to a big endian @ref XXH64_canonical_t.
+ *
+ * @param dst The @ref XXH64_canonical_t pointer to be stored to.
+ * @param hash The @ref XXH64_hash_t to be converted.
+ *
+ * @pre
+ * @p dst must not be `NULL`.
+ */
+XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH_NOESCAPE XXH64_canonical_t* dst, XXH64_hash_t hash);
+
+/*!
+ * @brief Converts an @ref XXH64_canonical_t to a native @ref XXH64_hash_t.
+ *
+ * @param src The @ref XXH64_canonical_t to convert.
+ *
+ * @pre
+ * @p src must not be `NULL`.
+ *
+ * @return The converted hash.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64_hashFromCanonical(XXH_NOESCAPE const XXH64_canonical_t* src);
+
+#ifndef XXH_NO_XXH3
+
+/*!
+ * @}
+ * ************************************************************************
+ * @defgroup XXH3_family XXH3 family
+ * @ingroup public
+ * @{
+ *
+ * XXH3 is a more recent hash algorithm featuring:
+ * - Improved speed for both small and large inputs
+ * - True 64-bit and 128-bit outputs
+ * - SIMD acceleration
+ * - Improved 32-bit viability
+ *
+ * Speed analysis methodology is explained here:
+ *
+ * https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html
+ *
+ * Compared to XXH64, expect XXH3 to run approximately
+ * ~2x faster on large inputs and >3x faster on small ones,
+ * exact differences vary depending on platform.
+ *
+ * XXH3's speed benefits greatly from SIMD and 64-bit arithmetic,
+ * but does not require it.
+ * Most 32-bit and 64-bit targets that can run XXH32 smoothly can run XXH3
+ * at competitive speeds, even without vector support. Further details are
+ * explained in the implementation.
+ *
+ * XXH3 has a fast scalar implementation, but it also includes accelerated SIMD
+ * implementations for many common platforms:
+ * - AVX512
+ * - AVX2
+ * - SSE2
+ * - ARM NEON
+ * - WebAssembly SIMD128
+ * - POWER8 VSX
+ * - s390x ZVector
+ * This can be controlled via the @ref XXH_VECTOR macro, but it automatically
+ * selects the best version according to predefined macros. For the x86 family, an
+ * automatic runtime dispatcher is included separately in @ref xxh_x86dispatch.c.
+ *
+ * XXH3 implementation is portable:
+ * it has a generic C90 formulation that can be compiled on any platform,
+ * all implementations generate exactly the same hash value on all platforms.
+ * Starting from v0.8.0, it's also labelled "stable", meaning that
+ * any future version will also generate the same hash value.
+ *
+ * XXH3 offers 2 variants, _64bits and _128bits.
+ *
+ * When only 64 bits are needed, prefer invoking the _64bits variant, as it
+ * reduces the amount of mixing, resulting in faster speed on small inputs.
+ * It's also generally simpler to manipulate a scalar return type than a struct.
+ *
+ * The API supports one-shot hashing, streaming mode, and custom secrets.
+ */
+/*-**********************************************************************
+* XXH3 64-bit variant
+************************************************************************/
+
+/*!
+ * @brief 64-bit unseeded variant of XXH3.
+ *
+ * This is equivalent to @ref XXH3_64bits_withSeed() with a seed of 0, however
+ * it may have slightly better performance due to constant propagation of the
+ * defaults.
+ *
+ * @see
+ * XXH32(), XXH64(), XXH3_128bits(): equivalent for the other xxHash algorithms
+ * @see
+ * XXH3_64bits_withSeed(), XXH3_64bits_withSecret(): other seeding variants
+ * @see
+ * XXH3_64bits_reset(), XXH3_64bits_update(), XXH3_64bits_digest(): Streaming version.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits(XXH_NOESCAPE const void* input, size_t length);
+
+/*!
+ * @brief 64-bit seeded variant of XXH3
+ *
+ * This variant generates a custom secret on the fly based on default secret
+ * altered using the `seed` value.
+ *
+ * While this operation is decently fast, note that it's not completely free.
+ *
+ * @note
+ * seed == 0 produces the same results as @ref XXH3_64bits().
+ *
+ * @param input The data to hash
+ * @param length The length
+ * @param seed The 64-bit seed to alter the state.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_withSeed(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed);
+
+/*!
+ * The bare minimum size for a custom secret.
+ *
+ * @see
+ * XXH3_64bits_withSecret(), XXH3_64bits_reset_withSecret(),
+ * XXH3_128bits_withSecret(), XXH3_128bits_reset_withSecret().
+ */
+#define XXH3_SECRET_SIZE_MIN 136
+
+/*!
+ * @brief 64-bit variant of XXH3 with a custom "secret".
+ *
+ * It's possible to provide any blob of bytes as a "secret" to generate the hash.
+ * This makes it more difficult for an external actor to prepare an intentional collision.
+ * The main condition is that secretSize *must* be large enough (>= XXH3_SECRET_SIZE_MIN).
+ * However, the quality of the secret impacts the dispersion of the hash algorithm.
+ * Therefore, the secret _must_ look like a bunch of random bytes.
+ * Avoid "trivial" or structured data such as repeated sequences or a text document.
+ * Whenever in doubt about the "randomness" of the blob of bytes,
+ * consider employing "XXH3_generateSecret()" instead (see below).
+ * It will generate a proper high entropy secret derived from the blob of bytes.
+ * Another advantage of using XXH3_generateSecret() is that
+ * it guarantees that all bits within the initial blob of bytes
+ * will impact every bit of the output.
+ * This is not necessarily the case when using the blob of bytes directly
+ * because, when hashing _small_ inputs, only a portion of the secret is employed.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_withSecret(XXH_NOESCAPE const void* data, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize);
+
+
+/******* Streaming *******/
+#ifndef XXH_NO_STREAM
+/*
+ * Streaming requires state maintenance.
+ * This operation costs memory and CPU.
+ * As a consequence, streaming is slower than one-shot hashing.
+ * For better performance, prefer one-shot functions whenever applicable.
+ */
+
+/*!
+ * @brief The state struct for the XXH3 streaming API.
+ *
+ * @see XXH3_state_s for details.
+ */
+typedef struct XXH3_state_s XXH3_state_t;
+XXH_PUBLIC_API XXH_MALLOCF XXH3_state_t* XXH3_createState(void);
+XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr);
+
+/*!
+ * @brief Copies one @ref XXH3_state_t to another.
+ *
+ * @param dst_state The state to copy to.
+ * @param src_state The state to copy from.
+ * @pre
+ * @p dst_state and @p src_state must not be `NULL` and must not overlap.
+ */
+XXH_PUBLIC_API void XXH3_copyState(XXH_NOESCAPE XXH3_state_t* dst_state, XXH_NOESCAPE const XXH3_state_t* src_state);
+
+/*!
+ * @brief Resets an @ref XXH3_state_t to begin a new hash.
+ *
+ * This function resets `statePtr` and generate a secret with default parameters. Call it before @ref XXH3_64bits_update().
+ * Digest will be equivalent to `XXH3_64bits()`.
+ *
+ * @param statePtr The state struct to reset.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ *
+ * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
+ *
+ */
+XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr);
+
+/*!
+ * @brief Resets an @ref XXH3_state_t with 64-bit seed to begin a new hash.
+ *
+ * This function resets `statePtr` and generate a secret from `seed`. Call it before @ref XXH3_64bits_update().
+ * Digest will be equivalent to `XXH3_64bits_withSeed()`.
+ *
+ * @param statePtr The state struct to reset.
+ * @param seed The 64-bit seed to alter the state.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ *
+ * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
+ *
+ */
+XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed);
+
+/*!
+ * XXH3_64bits_reset_withSecret():
+ * `secret` is referenced, it _must outlive_ the hash streaming session.
+ * Similar to one-shot API, `secretSize` must be >= `XXH3_SECRET_SIZE_MIN`,
+ * and the quality of produced hash values depends on secret's entropy
+ * (secret's content should look like a bunch of random bytes).
+ * When in doubt about the randomness of a candidate `secret`,
+ * consider employing `XXH3_generateSecret()` instead (see below).
+ */
+XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize);
+
+/*!
+ * @brief Consumes a block of @p input to an @ref XXH3_state_t.
+ *
+ * Call this to incrementally consume blocks of data.
+ *
+ * @param statePtr The state struct to update.
+ * @param input The block of data to be hashed, at least @p length bytes in size.
+ * @param length The length of @p input, in bytes.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ * @pre
+ * The memory between @p input and @p input + @p length must be valid,
+ * readable, contiguous memory. However, if @p length is `0`, @p input may be
+ * `NULL`. In C++, this also must be *TriviallyCopyable*.
+ *
+ * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
+ */
+XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update (XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length);
+
+/*!
+ * @brief Returns the calculated XXH3 64-bit hash value from an @ref XXH3_state_t.
+ *
+ * @note
+ * Calling XXH3_64bits_digest() will not affect @p statePtr, so you can update,
+ * digest, and update again.
+ *
+ * @param statePtr The state struct to calculate the hash from.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ *
+ * @return The calculated XXH3 64-bit hash value from that state.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_digest (XXH_NOESCAPE const XXH3_state_t* statePtr);
+#endif /* !XXH_NO_STREAM */
+
+/* note : canonical representation of XXH3 is the same as XXH64
+ * since they both produce XXH64_hash_t values */
+
+
+/*-**********************************************************************
+* XXH3 128-bit variant
+************************************************************************/
+
+/*!
+ * @brief The return value from 128-bit hashes.
+ *
+ * Stored in little endian order, although the fields themselves are in native
+ * endianness.
+ */
+typedef struct {
+ XXH64_hash_t low64; /*!< `value & 0xFFFFFFFFFFFFFFFF` */
+ XXH64_hash_t high64; /*!< `value >> 64` */
+} XXH128_hash_t;
+
+/*!
+ * @brief Unseeded 128-bit variant of XXH3
+ *
+ * The 128-bit variant of XXH3 has more strength, but it has a bit of overhead
+ * for shorter inputs.
+ *
+ * This is equivalent to @ref XXH3_128bits_withSeed() with a seed of 0, however
+ * it may have slightly better performance due to constant propagation of the
+ * defaults.
+ *
+ * @see
+ * XXH32(), XXH64(), XXH3_64bits(): equivalent for the other xxHash algorithms
+ * @see
+ * XXH3_128bits_withSeed(), XXH3_128bits_withSecret(): other seeding variants
+ * @see
+ * XXH3_128bits_reset(), XXH3_128bits_update(), XXH3_128bits_digest(): Streaming version.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits(XXH_NOESCAPE const void* data, size_t len);
+/*! @brief Seeded 128-bit variant of XXH3. @see XXH3_64bits_withSeed(). */
+XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_withSeed(XXH_NOESCAPE const void* data, size_t len, XXH64_hash_t seed);
+/*! @brief Custom secret 128-bit variant of XXH3. @see XXH3_64bits_withSecret(). */
+XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_withSecret(XXH_NOESCAPE const void* data, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize);
+
+/******* Streaming *******/
+#ifndef XXH_NO_STREAM
+/*
+ * Streaming requires state maintenance.
+ * This operation costs memory and CPU.
+ * As a consequence, streaming is slower than one-shot hashing.
+ * For better performance, prefer one-shot functions whenever applicable.
+ *
+ * XXH3_128bits uses the same XXH3_state_t as XXH3_64bits().
+ * Use already declared XXH3_createState() and XXH3_freeState().
+ *
+ * All reset and streaming functions have same meaning as their 64-bit counterpart.
+ */
+
+/*!
+ * @brief Resets an @ref XXH3_state_t to begin a new hash.
+ *
+ * This function resets `statePtr` and generate a secret with default parameters. Call it before @ref XXH3_128bits_update().
+ * Digest will be equivalent to `XXH3_128bits()`.
+ *
+ * @param statePtr The state struct to reset.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ *
+ * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
+ *
+ */
+XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr);
+
+/*!
+ * @brief Resets an @ref XXH3_state_t with 64-bit seed to begin a new hash.
+ *
+ * This function resets `statePtr` and generate a secret from `seed`. Call it before @ref XXH3_128bits_update().
+ * Digest will be equivalent to `XXH3_128bits_withSeed()`.
+ *
+ * @param statePtr The state struct to reset.
+ * @param seed The 64-bit seed to alter the state.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ *
+ * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
+ *
+ */
+XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed);
+/*! @brief Custom secret 128-bit variant of XXH3. @see XXH_64bits_reset_withSecret(). */
+XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize);
+
+/*!
+ * @brief Consumes a block of @p input to an @ref XXH3_state_t.
+ *
+ * Call this to incrementally consume blocks of data.
+ *
+ * @param statePtr The state struct to update.
+ * @param input The block of data to be hashed, at least @p length bytes in size.
+ * @param length The length of @p input, in bytes.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ * @pre
+ * The memory between @p input and @p input + @p length must be valid,
+ * readable, contiguous memory. However, if @p length is `0`, @p input may be
+ * `NULL`. In C++, this also must be *TriviallyCopyable*.
+ *
+ * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
+ */
+XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update (XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length);
+
+/*!
+ * @brief Returns the calculated XXH3 128-bit hash value from an @ref XXH3_state_t.
+ *
+ * @note
+ * Calling XXH3_128bits_digest() will not affect @p statePtr, so you can update,
+ * digest, and update again.
+ *
+ * @param statePtr The state struct to calculate the hash from.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ *
+ * @return The calculated XXH3 128-bit hash value from that state.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_digest (XXH_NOESCAPE const XXH3_state_t* statePtr);
+#endif /* !XXH_NO_STREAM */
+
+/* Following helper functions make it possible to compare XXH128_hast_t values.
+ * Since XXH128_hash_t is a structure, this capability is not offered by the language.
+ * Note: For better performance, these functions can be inlined using XXH_INLINE_ALL */
+
+/*!
+ * XXH128_isEqual():
+ * Return: 1 if `h1` and `h2` are equal, 0 if they are not.
+ */
+XXH_PUBLIC_API XXH_PUREF int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2);
+
+/*!
+ * @brief Compares two @ref XXH128_hash_t
+ * This comparator is compatible with stdlib's `qsort()`/`bsearch()`.
+ *
+ * @return: >0 if *h128_1 > *h128_2
+ * =0 if *h128_1 == *h128_2
+ * <0 if *h128_1 < *h128_2
+ */
+XXH_PUBLIC_API XXH_PUREF int XXH128_cmp(XXH_NOESCAPE const void* h128_1, XXH_NOESCAPE const void* h128_2);
+
+
+/******* Canonical representation *******/
+typedef struct { unsigned char digest[sizeof(XXH128_hash_t)]; } XXH128_canonical_t;
+
+
+/*!
+ * @brief Converts an @ref XXH128_hash_t to a big endian @ref XXH128_canonical_t.
+ *
+ * @param dst The @ref XXH128_canonical_t pointer to be stored to.
+ * @param hash The @ref XXH128_hash_t to be converted.
+ *
+ * @pre
+ * @p dst must not be `NULL`.
+ */
+XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH_NOESCAPE XXH128_canonical_t* dst, XXH128_hash_t hash);
+
+/*!
+ * @brief Converts an @ref XXH128_canonical_t to a native @ref XXH128_hash_t.
+ *
+ * @param src The @ref XXH128_canonical_t to convert.
+ *
+ * @pre
+ * @p src must not be `NULL`.
+ *
+ * @return The converted hash.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128_hashFromCanonical(XXH_NOESCAPE const XXH128_canonical_t* src);
+
+
+#endif /* !XXH_NO_XXH3 */
+#endif /* XXH_NO_LONG_LONG */
+
+/*!
+ * @}
+ */
+#endif /* XXHASH_H_5627135585666179 */
+
+
+
+#if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742)
+#define XXHASH_H_STATIC_13879238742
+/* ****************************************************************************
+ * This section contains declarations which are not guaranteed to remain stable.
+ * They may change in future versions, becoming incompatible with a different
+ * version of the library.
+ * These declarations should only be used with static linking.
+ * Never use them in association with dynamic linking!
+ ***************************************************************************** */
+
+/*
+ * These definitions are only present to allow static allocation
+ * of XXH states, on stack or in a struct, for example.
+ * Never **ever** access their members directly.
+ */
+
+/*!
+ * @internal
+ * @brief Structure for XXH32 streaming API.
+ *
+ * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
+ * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is
+ * an opaque type. This allows fields to safely be changed.
+ *
+ * Typedef'd to @ref XXH32_state_t.
+ * Do not access the members of this struct directly.
+ * @see XXH64_state_s, XXH3_state_s
+ */
+struct XXH32_state_s {
+ XXH32_hash_t total_len_32; /*!< Total length hashed, modulo 2^32 */
+ XXH32_hash_t large_len; /*!< Whether the hash is >= 16 (handles @ref total_len_32 overflow) */
+ XXH32_hash_t v[4]; /*!< Accumulator lanes */
+ XXH32_hash_t mem32[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[16]. */
+ XXH32_hash_t memsize; /*!< Amount of data in @ref mem32 */
+ XXH32_hash_t reserved; /*!< Reserved field. Do not read nor write to it. */
+}; /* typedef'd to XXH32_state_t */
+
+
+#ifndef XXH_NO_LONG_LONG /* defined when there is no 64-bit support */
+
+/*!
+ * @internal
+ * @brief Structure for XXH64 streaming API.
+ *
+ * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
+ * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is
+ * an opaque type. This allows fields to safely be changed.
+ *
+ * Typedef'd to @ref XXH64_state_t.
+ * Do not access the members of this struct directly.
+ * @see XXH32_state_s, XXH3_state_s
+ */
+struct XXH64_state_s {
+ XXH64_hash_t total_len; /*!< Total length hashed. This is always 64-bit. */
+ XXH64_hash_t v[4]; /*!< Accumulator lanes */
+ XXH64_hash_t mem64[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[32]. */
+ XXH32_hash_t memsize; /*!< Amount of data in @ref mem64 */
+ XXH32_hash_t reserved32; /*!< Reserved field, needed for padding anyways*/
+ XXH64_hash_t reserved64; /*!< Reserved field. Do not read or write to it. */
+}; /* typedef'd to XXH64_state_t */
+
+#ifndef XXH_NO_XXH3
+
+#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* >= C11 */
+# include <stdalign.h>
+# define XXH_ALIGN(n) alignas(n)
+#elif defined(__cplusplus) && (__cplusplus >= 201103L) /* >= C++11 */
+/* In C++ alignas() is a keyword */
+# define XXH_ALIGN(n) alignas(n)
+#elif defined(__GNUC__)
+# define XXH_ALIGN(n) __attribute__ ((aligned(n)))
+#elif defined(_MSC_VER)
+# define XXH_ALIGN(n) __declspec(align(n))
+#else
+# define XXH_ALIGN(n) /* disabled */
+#endif
+
+/* Old GCC versions only accept the attribute after the type in structures. */
+#if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) /* C11+ */ \
+ && ! (defined(__cplusplus) && (__cplusplus >= 201103L)) /* >= C++11 */ \
+ && defined(__GNUC__)
+# define XXH_ALIGN_MEMBER(align, type) type XXH_ALIGN(align)
+#else
+# define XXH_ALIGN_MEMBER(align, type) XXH_ALIGN(align) type
+#endif
+
+/*!
+ * @brief The size of the internal XXH3 buffer.
+ *
+ * This is the optimal update size for incremental hashing.
+ *
+ * @see XXH3_64b_update(), XXH3_128b_update().
+ */
+#define XXH3_INTERNALBUFFER_SIZE 256
+
+/*!
+ * @internal
+ * @brief Default size of the secret buffer (and @ref XXH3_kSecret).
+ *
+ * This is the size used in @ref XXH3_kSecret and the seeded functions.
+ *
+ * Not to be confused with @ref XXH3_SECRET_SIZE_MIN.
+ */
+#define XXH3_SECRET_DEFAULT_SIZE 192
+
+/*!
+ * @internal
+ * @brief Structure for XXH3 streaming API.
+ *
+ * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
+ * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined.
+ * Otherwise it is an opaque type.
+ * Never use this definition in combination with dynamic library.
+ * This allows fields to safely be changed in the future.
+ *
+ * @note ** This structure has a strict alignment requirement of 64 bytes!! **
+ * Do not allocate this with `malloc()` or `new`,
+ * it will not be sufficiently aligned.
+ * Use @ref XXH3_createState() and @ref XXH3_freeState(), or stack allocation.
+ *
+ * Typedef'd to @ref XXH3_state_t.
+ * Do never access the members of this struct directly.
+ *
+ * @see XXH3_INITSTATE() for stack initialization.
+ * @see XXH3_createState(), XXH3_freeState().
+ * @see XXH32_state_s, XXH64_state_s
+ */
+struct XXH3_state_s {
+ XXH_ALIGN_MEMBER(64, XXH64_hash_t acc[8]);
+ /*!< The 8 accumulators. See @ref XXH32_state_s::v and @ref XXH64_state_s::v */
+ XXH_ALIGN_MEMBER(64, unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]);
+ /*!< Used to store a custom secret generated from a seed. */
+ XXH_ALIGN_MEMBER(64, unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]);
+ /*!< The internal buffer. @see XXH32_state_s::mem32 */
+ XXH32_hash_t bufferedSize;
+ /*!< The amount of memory in @ref buffer, @see XXH32_state_s::memsize */
+ XXH32_hash_t useSeed;
+ /*!< Reserved field. Needed for padding on 64-bit. */
+ size_t nbStripesSoFar;
+ /*!< Number or stripes processed. */
+ XXH64_hash_t totalLen;
+ /*!< Total length hashed. 64-bit even on 32-bit targets. */
+ size_t nbStripesPerBlock;
+ /*!< Number of stripes per block. */
+ size_t secretLimit;
+ /*!< Size of @ref customSecret or @ref extSecret */
+ XXH64_hash_t seed;
+ /*!< Seed for _withSeed variants. Must be zero otherwise, @see XXH3_INITSTATE() */
+ XXH64_hash_t reserved64;
+ /*!< Reserved field. */
+ const unsigned char* extSecret;
+ /*!< Reference to an external secret for the _withSecret variants, NULL
+ * for other variants. */
+ /* note: there may be some padding at the end due to alignment on 64 bytes */
+}; /* typedef'd to XXH3_state_t */
+
+#undef XXH_ALIGN_MEMBER
+
+/*!
+ * @brief Initializes a stack-allocated `XXH3_state_s`.
+ *
+ * When the @ref XXH3_state_t structure is merely emplaced on stack,
+ * it should be initialized with XXH3_INITSTATE() or a memset()
+ * in case its first reset uses XXH3_NNbits_reset_withSeed().
+ * This init can be omitted if the first reset uses default or _withSecret mode.
+ * This operation isn't necessary when the state is created with XXH3_createState().
+ * Note that this doesn't prepare the state for a streaming operation,
+ * it's still necessary to use XXH3_NNbits_reset*() afterwards.
+ */
+#define XXH3_INITSTATE(XXH3_state_ptr) \
+ do { \
+ XXH3_state_t* tmp_xxh3_state_ptr = (XXH3_state_ptr); \
+ tmp_xxh3_state_ptr->seed = 0; \
+ tmp_xxh3_state_ptr->extSecret = NULL; \
+ } while(0)
+
+
+/*!
+ * simple alias to pre-selected XXH3_128bits variant
+ */
+XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128(XXH_NOESCAPE const void* data, size_t len, XXH64_hash_t seed);
+
+
+/* === Experimental API === */
+/* Symbols defined below must be considered tied to a specific library version. */
+
+/*!
+ * XXH3_generateSecret():
+ *
+ * Derive a high-entropy secret from any user-defined content, named customSeed.
+ * The generated secret can be used in combination with `*_withSecret()` functions.
+ * The `_withSecret()` variants are useful to provide a higher level of protection
+ * than 64-bit seed, as it becomes much more difficult for an external actor to
+ * guess how to impact the calculation logic.
+ *
+ * The function accepts as input a custom seed of any length and any content,
+ * and derives from it a high-entropy secret of length @p secretSize into an
+ * already allocated buffer @p secretBuffer.
+ *
+ * The generated secret can then be used with any `*_withSecret()` variant.
+ * The functions @ref XXH3_128bits_withSecret(), @ref XXH3_64bits_withSecret(),
+ * @ref XXH3_128bits_reset_withSecret() and @ref XXH3_64bits_reset_withSecret()
+ * are part of this list. They all accept a `secret` parameter
+ * which must be large enough for implementation reasons (>= @ref XXH3_SECRET_SIZE_MIN)
+ * _and_ feature very high entropy (consist of random-looking bytes).
+ * These conditions can be a high bar to meet, so @ref XXH3_generateSecret() can
+ * be employed to ensure proper quality.
+ *
+ * @p customSeed can be anything. It can have any size, even small ones,
+ * and its content can be anything, even "poor entropy" sources such as a bunch
+ * of zeroes. The resulting `secret` will nonetheless provide all required qualities.
+ *
+ * @pre
+ * - @p secretSize must be >= @ref XXH3_SECRET_SIZE_MIN
+ * - When @p customSeedSize > 0, supplying NULL as customSeed is undefined behavior.
+ *
+ * Example code:
+ * @code{.c}
+ * #include <stdio.h>
+ * #include <stdlib.h>
+ * #include <string.h>
+ * #define XXH_STATIC_LINKING_ONLY // expose unstable API
+ * #include "xxhash.h"
+ * // Hashes argv[2] using the entropy from argv[1].
+ * int main(int argc, char* argv[])
+ * {
+ * char secret[XXH3_SECRET_SIZE_MIN];
+ * if (argv != 3) { return 1; }
+ * XXH3_generateSecret(secret, sizeof(secret), argv[1], strlen(argv[1]));
+ * XXH64_hash_t h = XXH3_64bits_withSecret(
+ * argv[2], strlen(argv[2]),
+ * secret, sizeof(secret)
+ * );
+ * printf("%016llx\n", (unsigned long long) h);
+ * }
+ * @endcode
+ */
+XXH_PUBLIC_API XXH_errorcode XXH3_generateSecret(XXH_NOESCAPE void* secretBuffer, size_t secretSize, XXH_NOESCAPE const void* customSeed, size_t customSeedSize);
+
+/*!
+ * @brief Generate the same secret as the _withSeed() variants.
+ *
+ * The generated secret can be used in combination with
+ *`*_withSecret()` and `_withSecretandSeed()` variants.
+ *
+ * Example C++ `std::string` hash class:
+ * @code{.cpp}
+ * #include <string>
+ * #define XXH_STATIC_LINKING_ONLY // expose unstable API
+ * #include "xxhash.h"
+ * // Slow, seeds each time
+ * class HashSlow {
+ * XXH64_hash_t seed;
+ * public:
+ * HashSlow(XXH64_hash_t s) : seed{s} {}
+ * size_t operator()(const std::string& x) const {
+ * return size_t{XXH3_64bits_withSeed(x.c_str(), x.length(), seed)};
+ * }
+ * };
+ * // Fast, caches the seeded secret for future uses.
+ * class HashFast {
+ * unsigned char secret[XXH3_SECRET_SIZE_MIN];
+ * public:
+ * HashFast(XXH64_hash_t s) {
+ * XXH3_generateSecret_fromSeed(secret, seed);
+ * }
+ * size_t operator()(const std::string& x) const {
+ * return size_t{
+ * XXH3_64bits_withSecret(x.c_str(), x.length(), secret, sizeof(secret))
+ * };
+ * }
+ * };
+ * @endcode
+ * @param secretBuffer A writable buffer of @ref XXH3_SECRET_SIZE_MIN bytes
+ * @param seed The seed to seed the state.
+ */
+XXH_PUBLIC_API void XXH3_generateSecret_fromSeed(XXH_NOESCAPE void* secretBuffer, XXH64_hash_t seed);
+
+/*!
+ * These variants generate hash values using either
+ * @p seed for "short" keys (< XXH3_MIDSIZE_MAX = 240 bytes)
+ * or @p secret for "large" keys (>= XXH3_MIDSIZE_MAX).
+ *
+ * This generally benefits speed, compared to `_withSeed()` or `_withSecret()`.
+ * `_withSeed()` has to generate the secret on the fly for "large" keys.
+ * It's fast, but can be perceptible for "not so large" keys (< 1 KB).
+ * `_withSecret()` has to generate the masks on the fly for "small" keys,
+ * which requires more instructions than _withSeed() variants.
+ * Therefore, _withSecretandSeed variant combines the best of both worlds.
+ *
+ * When @p secret has been generated by XXH3_generateSecret_fromSeed(),
+ * this variant produces *exactly* the same results as `_withSeed()` variant,
+ * hence offering only a pure speed benefit on "large" input,
+ * by skipping the need to regenerate the secret for every large input.
+ *
+ * Another usage scenario is to hash the secret to a 64-bit hash value,
+ * for example with XXH3_64bits(), which then becomes the seed,
+ * and then employ both the seed and the secret in _withSecretandSeed().
+ * On top of speed, an added benefit is that each bit in the secret
+ * has a 50% chance to swap each bit in the output, via its impact to the seed.
+ *
+ * This is not guaranteed when using the secret directly in "small data" scenarios,
+ * because only portions of the secret are employed for small data.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH64_hash_t
+XXH3_64bits_withSecretandSeed(XXH_NOESCAPE const void* data, size_t len,
+ XXH_NOESCAPE const void* secret, size_t secretSize,
+ XXH64_hash_t seed);
+/*! @copydoc XXH3_64bits_withSecretandSeed() */
+XXH_PUBLIC_API XXH_PUREF XXH128_hash_t
+XXH3_128bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t length,
+ XXH_NOESCAPE const void* secret, size_t secretSize,
+ XXH64_hash_t seed64);
+#ifndef XXH_NO_STREAM
+/*! @copydoc XXH3_64bits_withSecretandSeed() */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_64bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr,
+ XXH_NOESCAPE const void* secret, size_t secretSize,
+ XXH64_hash_t seed64);
+/*! @copydoc XXH3_64bits_withSecretandSeed() */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_128bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr,
+ XXH_NOESCAPE const void* secret, size_t secretSize,
+ XXH64_hash_t seed64);
+#endif /* !XXH_NO_STREAM */
+
+#endif /* !XXH_NO_XXH3 */
+#endif /* XXH_NO_LONG_LONG */
+#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
+# define XXH_IMPLEMENTATION
+#endif
+
+#endif /* defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) */
+
+
+/* ======================================================================== */
+/* ======================================================================== */
+/* ======================================================================== */
+
+
+/*-**********************************************************************
+ * xxHash implementation
+ *-**********************************************************************
+ * xxHash's implementation used to be hosted inside xxhash.c.
+ *
+ * However, inlining requires implementation to be visible to the compiler,
+ * hence be included alongside the header.
+ * Previously, implementation was hosted inside xxhash.c,
+ * which was then #included when inlining was activated.
+ * This construction created issues with a few build and install systems,
+ * as it required xxhash.c to be stored in /include directory.
+ *
+ * xxHash implementation is now directly integrated within xxhash.h.
+ * As a consequence, xxhash.c is no longer needed in /include.
+ *
+ * xxhash.c is still available and is still useful.
+ * In a "normal" setup, when xxhash is not inlined,
+ * xxhash.h only exposes the prototypes and public symbols,
+ * while xxhash.c can be built into an object file xxhash.o
+ * which can then be linked into the final binary.
+ ************************************************************************/
+
+#if ( defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) \
+ || defined(XXH_IMPLEMENTATION) ) && !defined(XXH_IMPLEM_13a8737387)
+# define XXH_IMPLEM_13a8737387
+
+/* *************************************
+* Tuning parameters
+***************************************/
+
+/*!
+ * @defgroup tuning Tuning parameters
+ * @{
+ *
+ * Various macros to control xxHash's behavior.
+ */
+#ifdef XXH_DOXYGEN
+/*!
+ * @brief Define this to disable 64-bit code.
+ *
+ * Useful if only using the @ref XXH32_family and you have a strict C90 compiler.
+ */
+# define XXH_NO_LONG_LONG
+# undef XXH_NO_LONG_LONG /* don't actually */
+/*!
+ * @brief Controls how unaligned memory is accessed.
+ *
+ * By default, access to unaligned memory is controlled by `memcpy()`, which is
+ * safe and portable.
+ *
+ * Unfortunately, on some target/compiler combinations, the generated assembly
+ * is sub-optimal.
+ *
+ * The below switch allow selection of a different access method
+ * in the search for improved performance.
+ *
+ * @par Possible options:
+ *
+ * - `XXH_FORCE_MEMORY_ACCESS=0` (default): `memcpy`
+ * @par
+ * Use `memcpy()`. Safe and portable. Note that most modern compilers will
+ * eliminate the function call and treat it as an unaligned access.
+ *
+ * - `XXH_FORCE_MEMORY_ACCESS=1`: `__attribute__((aligned(1)))`
+ * @par
+ * Depends on compiler extensions and is therefore not portable.
+ * This method is safe _if_ your compiler supports it,
+ * and *generally* as fast or faster than `memcpy`.
+ *
+ * - `XXH_FORCE_MEMORY_ACCESS=2`: Direct cast
+ * @par
+ * Casts directly and dereferences. This method doesn't depend on the
+ * compiler, but it violates the C standard as it directly dereferences an
+ * unaligned pointer. It can generate buggy code on targets which do not
+ * support unaligned memory accesses, but in some circumstances, it's the
+ * only known way to get the most performance.
+ *
+ * - `XXH_FORCE_MEMORY_ACCESS=3`: Byteshift
+ * @par
+ * Also portable. This can generate the best code on old compilers which don't
+ * inline small `memcpy()` calls, and it might also be faster on big-endian
+ * systems which lack a native byteswap instruction. However, some compilers
+ * will emit literal byteshifts even if the target supports unaligned access.
+ *
+ *
+ * @warning
+ * Methods 1 and 2 rely on implementation-defined behavior. Use these with
+ * care, as what works on one compiler/platform/optimization level may cause
+ * another to read garbage data or even crash.
+ *
+ * See https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html for details.
+ *
+ * Prefer these methods in priority order (0 > 3 > 1 > 2)
+ */
+# define XXH_FORCE_MEMORY_ACCESS 0
+
+/*!
+ * @def XXH_SIZE_OPT
+ * @brief Controls how much xxHash optimizes for size.
+ *
+ * xxHash, when compiled, tends to result in a rather large binary size. This
+ * is mostly due to heavy usage to forced inlining and constant folding of the
+ * @ref XXH3_family to increase performance.
+ *
+ * However, some developers prefer size over speed. This option can
+ * significantly reduce the size of the generated code. When using the `-Os`
+ * or `-Oz` options on GCC or Clang, this is defined to 1 by default,
+ * otherwise it is defined to 0.
+ *
+ * Most of these size optimizations can be controlled manually.
+ *
+ * This is a number from 0-2.
+ * - `XXH_SIZE_OPT` == 0: Default. xxHash makes no size optimizations. Speed
+ * comes first.
+ * - `XXH_SIZE_OPT` == 1: Default for `-Os` and `-Oz`. xxHash is more
+ * conservative and disables hacks that increase code size. It implies the
+ * options @ref XXH_NO_INLINE_HINTS == 1, @ref XXH_FORCE_ALIGN_CHECK == 0,
+ * and @ref XXH3_NEON_LANES == 8 if they are not already defined.
+ * - `XXH_SIZE_OPT` == 2: xxHash tries to make itself as small as possible.
+ * Performance may cry. For example, the single shot functions just use the
+ * streaming API.
+ */
+# define XXH_SIZE_OPT 0
+
+/*!
+ * @def XXH_FORCE_ALIGN_CHECK
+ * @brief If defined to non-zero, adds a special path for aligned inputs (XXH32()
+ * and XXH64() only).
+ *
+ * This is an important performance trick for architectures without decent
+ * unaligned memory access performance.
+ *
+ * It checks for input alignment, and when conditions are met, uses a "fast
+ * path" employing direct 32-bit/64-bit reads, resulting in _dramatically
+ * faster_ read speed.
+ *
+ * The check costs one initial branch per hash, which is generally negligible,
+ * but not zero.
+ *
+ * Moreover, it's not useful to generate an additional code path if memory
+ * access uses the same instruction for both aligned and unaligned
+ * addresses (e.g. x86 and aarch64).
+ *
+ * In these cases, the alignment check can be removed by setting this macro to 0.
+ * Then the code will always use unaligned memory access.
+ * Align check is automatically disabled on x86, x64, ARM64, and some ARM chips
+ * which are platforms known to offer good unaligned memory accesses performance.
+ *
+ * It is also disabled by default when @ref XXH_SIZE_OPT >= 1.
+ *
+ * This option does not affect XXH3 (only XXH32 and XXH64).
+ */
+# define XXH_FORCE_ALIGN_CHECK 0
+
+/*!
+ * @def XXH_NO_INLINE_HINTS
+ * @brief When non-zero, sets all functions to `static`.
+ *
+ * By default, xxHash tries to force the compiler to inline almost all internal
+ * functions.
+ *
+ * This can usually improve performance due to reduced jumping and improved
+ * constant folding, but significantly increases the size of the binary which
+ * might not be favorable.
+ *
+ * Additionally, sometimes the forced inlining can be detrimental to performance,
+ * depending on the architecture.
+ *
+ * XXH_NO_INLINE_HINTS marks all internal functions as static, giving the
+ * compiler full control on whether to inline or not.
+ *
+ * When not optimizing (-O0), using `-fno-inline` with GCC or Clang, or if
+ * @ref XXH_SIZE_OPT >= 1, this will automatically be defined.
+ */
+# define XXH_NO_INLINE_HINTS 0
+
+/*!
+ * @def XXH3_INLINE_SECRET
+ * @brief Determines whether to inline the XXH3 withSecret code.
+ *
+ * When the secret size is known, the compiler can improve the performance
+ * of XXH3_64bits_withSecret() and XXH3_128bits_withSecret().
+ *
+ * However, if the secret size is not known, it doesn't have any benefit. This
+ * happens when xxHash is compiled into a global symbol. Therefore, if
+ * @ref XXH_INLINE_ALL is *not* defined, this will be defined to 0.
+ *
+ * Additionally, this defaults to 0 on GCC 12+, which has an issue with function pointers
+ * that are *sometimes* force inline on -Og, and it is impossible to automatically
+ * detect this optimization level.
+ */
+# define XXH3_INLINE_SECRET 0
+
+/*!
+ * @def XXH32_ENDJMP
+ * @brief Whether to use a jump for `XXH32_finalize`.
+ *
+ * For performance, `XXH32_finalize` uses multiple branches in the finalizer.
+ * This is generally preferable for performance,
+ * but depending on exact architecture, a jmp may be preferable.
+ *
+ * This setting is only possibly making a difference for very small inputs.
+ */
+# define XXH32_ENDJMP 0
+
+/*!
+ * @internal
+ * @brief Redefines old internal names.
+ *
+ * For compatibility with code that uses xxHash's internals before the names
+ * were changed to improve namespacing. There is no other reason to use this.
+ */
+# define XXH_OLD_NAMES
+# undef XXH_OLD_NAMES /* don't actually use, it is ugly. */
+
+/*!
+ * @def XXH_NO_STREAM
+ * @brief Disables the streaming API.
+ *
+ * When xxHash is not inlined and the streaming functions are not used, disabling
+ * the streaming functions can improve code size significantly, especially with
+ * the @ref XXH3_family which tends to make constant folded copies of itself.
+ */
+# define XXH_NO_STREAM
+# undef XXH_NO_STREAM /* don't actually */
+#endif /* XXH_DOXYGEN */
+/*!
+ * @}
+ */
+
+#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
+ /* prefer __packed__ structures (method 1) for GCC
+ * < ARMv7 with unaligned access (e.g. Raspbian armhf) still uses byte shifting, so we use memcpy
+ * which for some reason does unaligned loads. */
+# if defined(__GNUC__) && !(defined(__ARM_ARCH) && __ARM_ARCH < 7 && defined(__ARM_FEATURE_UNALIGNED))
+# define XXH_FORCE_MEMORY_ACCESS 1
+# endif
+#endif
+
+#ifndef XXH_SIZE_OPT
+ /* default to 1 for -Os or -Oz */
+# if (defined(__GNUC__) || defined(__clang__)) && defined(__OPTIMIZE_SIZE__)
+# define XXH_SIZE_OPT 1
+# else
+# define XXH_SIZE_OPT 0
+# endif
+#endif
+
+#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
+ /* don't check on sizeopt, x86, aarch64, or arm when unaligned access is available */
+# if XXH_SIZE_OPT >= 1 || \
+ defined(__i386) || defined(__x86_64__) || defined(__aarch64__) || defined(__ARM_FEATURE_UNALIGNED) \
+ || defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64) || defined(_M_ARM) /* visual */
+# define XXH_FORCE_ALIGN_CHECK 0
+# else
+# define XXH_FORCE_ALIGN_CHECK 1
+# endif
+#endif
+
+#ifndef XXH_NO_INLINE_HINTS
+# if XXH_SIZE_OPT >= 1 || defined(__NO_INLINE__) /* -O0, -fno-inline */
+# define XXH_NO_INLINE_HINTS 1
+# else
+# define XXH_NO_INLINE_HINTS 0
+# endif
+#endif
+
+#ifndef XXH3_INLINE_SECRET
+# if (defined(__GNUC__) && !defined(__clang__) && __GNUC__ >= 12) \
+ || !defined(XXH_INLINE_ALL)
+# define XXH3_INLINE_SECRET 0
+# else
+# define XXH3_INLINE_SECRET 1
+# endif
+#endif
+
+#ifndef XXH32_ENDJMP
+/* generally preferable for performance */
+# define XXH32_ENDJMP 0
+#endif
+
+/*!
+ * @defgroup impl Implementation
+ * @{
+ */
+
+
+/* *************************************
+* Includes & Memory related functions
+***************************************/
+#if defined(XXH_NO_STREAM)
+/* nothing */
+#elif defined(XXH_NO_STDLIB)
+
+/* When requesting to disable any mention of stdlib,
+ * the library loses the ability to invoked malloc / free.
+ * In practice, it means that functions like `XXH*_createState()`
+ * will always fail, and return NULL.
+ * This flag is useful in situations where
+ * xxhash.h is integrated into some kernel, embedded or limited environment
+ * without access to dynamic allocation.
+ */
+
+static XXH_CONSTF void* XXH_malloc(size_t s) { (void)s; return NULL; }
+static void XXH_free(void* p) { (void)p; }
+
+#else
+
+/*
+ * Modify the local functions below should you wish to use
+ * different memory routines for malloc() and free()
+ */
+#include <stdlib.h>
+
+/*!
+ * @internal
+ * @brief Modify this function to use a different routine than malloc().
+ */
+static XXH_MALLOCF void* XXH_malloc(size_t s) { return malloc(s); }
+
+/*!
+ * @internal
+ * @brief Modify this function to use a different routine than free().
+ */
+static void XXH_free(void* p) { free(p); }
+
+#endif /* XXH_NO_STDLIB */
+
+#include <string.h>
+
+/*!
+ * @internal
+ * @brief Modify this function to use a different routine than memcpy().
+ */
+static void* XXH_memcpy(void* dest, const void* src, size_t size)
+{
+ return memcpy(dest,src,size);
+}
+
+#include <limits.h> /* ULLONG_MAX */
+
+
+/* *************************************
+* Compiler Specific Options
+***************************************/
+#ifdef _MSC_VER /* Visual Studio warning fix */
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+#endif
+
+#if XXH_NO_INLINE_HINTS /* disable inlining hints */
+# if defined(__GNUC__) || defined(__clang__)
+# define XXH_FORCE_INLINE static __attribute__((unused))
+# else
+# define XXH_FORCE_INLINE static
+# endif
+# define XXH_NO_INLINE static
+/* enable inlining hints */
+#elif defined(__GNUC__) || defined(__clang__)
+# define XXH_FORCE_INLINE static __inline__ __attribute__((always_inline, unused))
+# define XXH_NO_INLINE static __attribute__((noinline))
+#elif defined(_MSC_VER) /* Visual Studio */
+# define XXH_FORCE_INLINE static __forceinline
+# define XXH_NO_INLINE static __declspec(noinline)
+#elif defined (__cplusplus) \
+ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* C99 */
+# define XXH_FORCE_INLINE static inline
+# define XXH_NO_INLINE static
+#else
+# define XXH_FORCE_INLINE static
+# define XXH_NO_INLINE static
+#endif
+
+#if XXH3_INLINE_SECRET
+# define XXH3_WITH_SECRET_INLINE XXH_FORCE_INLINE
+#else
+# define XXH3_WITH_SECRET_INLINE XXH_NO_INLINE
+#endif
+
+
+/* *************************************
+* Debug
+***************************************/
+/*!
+ * @ingroup tuning
+ * @def XXH_DEBUGLEVEL
+ * @brief Sets the debugging level.
+ *
+ * XXH_DEBUGLEVEL is expected to be defined externally, typically via the
+ * compiler's command line options. The value must be a number.
+ */
+#ifndef XXH_DEBUGLEVEL
+# ifdef DEBUGLEVEL /* backwards compat */
+# define XXH_DEBUGLEVEL DEBUGLEVEL
+# else
+# define XXH_DEBUGLEVEL 0
+# endif
+#endif
+
+#if (XXH_DEBUGLEVEL>=1)
+# include <assert.h> /* note: can still be disabled with NDEBUG */
+# define XXH_ASSERT(c) assert(c)
+#else
+# if defined(__INTEL_COMPILER)
+# define XXH_ASSERT(c) XXH_ASSUME((unsigned char) (c))
+# else
+# define XXH_ASSERT(c) XXH_ASSUME(c)
+# endif
+#endif
+
+/* note: use after variable declarations */
+#ifndef XXH_STATIC_ASSERT
+# if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11 */
+# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { _Static_assert((c),m); } while(0)
+# elif defined(__cplusplus) && (__cplusplus >= 201103L) /* C++11 */
+# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { static_assert((c),m); } while(0)
+# else
+# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { struct xxh_sa { char x[(c) ? 1 : -1]; }; } while(0)
+# endif
+# define XXH_STATIC_ASSERT(c) XXH_STATIC_ASSERT_WITH_MESSAGE((c),#c)
+#endif
+
+/*!
+ * @internal
+ * @def XXH_COMPILER_GUARD(var)
+ * @brief Used to prevent unwanted optimizations for @p var.
+ *
+ * It uses an empty GCC inline assembly statement with a register constraint
+ * which forces @p var into a general purpose register (eg eax, ebx, ecx
+ * on x86) and marks it as modified.
+ *
+ * This is used in a few places to avoid unwanted autovectorization (e.g.
+ * XXH32_round()). All vectorization we want is explicit via intrinsics,
+ * and _usually_ isn't wanted elsewhere.
+ *
+ * We also use it to prevent unwanted constant folding for AArch64 in
+ * XXH3_initCustomSecret_scalar().
+ */
+#if defined(__GNUC__) || defined(__clang__)
+# define XXH_COMPILER_GUARD(var) __asm__("" : "+r" (var))
+#else
+# define XXH_COMPILER_GUARD(var) ((void)0)
+#endif
+
+/* Specifically for NEON vectors which use the "w" constraint, on
+ * Clang. */
+#if defined(__clang__) && defined(__ARM_ARCH) && !defined(__wasm__)
+# define XXH_COMPILER_GUARD_CLANG_NEON(var) __asm__("" : "+w" (var))
+#else
+# define XXH_COMPILER_GUARD_CLANG_NEON(var) ((void)0)
+#endif
+
+/* *************************************
+* Basic Types
+***************************************/
+#if !defined (__VMS) \
+ && (defined (__cplusplus) \
+ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
+# include <stdint.h>
+ typedef uint8_t xxh_u8;
+#else
+ typedef unsigned char xxh_u8;
+#endif
+typedef XXH32_hash_t xxh_u32;
+
+#ifdef XXH_OLD_NAMES
+# warning "XXH_OLD_NAMES is planned to be removed starting v0.9. If the program depends on it, consider moving away from it by employing newer type names directly"
+# define BYTE xxh_u8
+# define U8 xxh_u8
+# define U32 xxh_u32
+#endif
+
+/* *** Memory access *** */
+
+/*!
+ * @internal
+ * @fn xxh_u32 XXH_read32(const void* ptr)
+ * @brief Reads an unaligned 32-bit integer from @p ptr in native endianness.
+ *
+ * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
+ *
+ * @param ptr The pointer to read from.
+ * @return The 32-bit native endian integer from the bytes at @p ptr.
+ */
+
+/*!
+ * @internal
+ * @fn xxh_u32 XXH_readLE32(const void* ptr)
+ * @brief Reads an unaligned 32-bit little endian integer from @p ptr.
+ *
+ * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
+ *
+ * @param ptr The pointer to read from.
+ * @return The 32-bit little endian integer from the bytes at @p ptr.
+ */
+
+/*!
+ * @internal
+ * @fn xxh_u32 XXH_readBE32(const void* ptr)
+ * @brief Reads an unaligned 32-bit big endian integer from @p ptr.
+ *
+ * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
+ *
+ * @param ptr The pointer to read from.
+ * @return The 32-bit big endian integer from the bytes at @p ptr.
+ */
+
+/*!
+ * @internal
+ * @fn xxh_u32 XXH_readLE32_align(const void* ptr, XXH_alignment align)
+ * @brief Like @ref XXH_readLE32(), but has an option for aligned reads.
+ *
+ * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
+ * Note that when @ref XXH_FORCE_ALIGN_CHECK == 0, the @p align parameter is
+ * always @ref XXH_alignment::XXH_unaligned.
+ *
+ * @param ptr The pointer to read from.
+ * @param align Whether @p ptr is aligned.
+ * @pre
+ * If @p align == @ref XXH_alignment::XXH_aligned, @p ptr must be 4 byte
+ * aligned.
+ * @return The 32-bit little endian integer from the bytes at @p ptr.
+ */
+
+#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
+/*
+ * Manual byteshift. Best for old compilers which don't inline memcpy.
+ * We actually directly use XXH_readLE32 and XXH_readBE32.
+ */
+#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
+
+/*
+ * Force direct memory access. Only works on CPU which support unaligned memory
+ * access in hardware.
+ */
+static xxh_u32 XXH_read32(const void* memPtr) { return *(const xxh_u32*) memPtr; }
+
+#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
+
+/*
+ * __attribute__((aligned(1))) is supported by gcc and clang. Originally the
+ * documentation claimed that it only increased the alignment, but actually it
+ * can decrease it on gcc, clang, and icc:
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69502,
+ * https://gcc.godbolt.org/z/xYez1j67Y.
+ */
+#ifdef XXH_OLD_NAMES
+typedef union { xxh_u32 u32; } __attribute__((packed)) unalign;
+#endif
+static xxh_u32 XXH_read32(const void* ptr)
+{
+ typedef __attribute__((aligned(1))) xxh_u32 xxh_unalign32;
+ return *((const xxh_unalign32*)ptr);
+}
+
+#else
+
+/*
+ * Portable and safe solution. Generally efficient.
+ * see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html
+ */
+static xxh_u32 XXH_read32(const void* memPtr)
+{
+ xxh_u32 val;
+ XXH_memcpy(&val, memPtr, sizeof(val));
+ return val;
+}
+
+#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
+
+
+/* *** Endianness *** */
+
+/*!
+ * @ingroup tuning
+ * @def XXH_CPU_LITTLE_ENDIAN
+ * @brief Whether the target is little endian.
+ *
+ * Defined to 1 if the target is little endian, or 0 if it is big endian.
+ * It can be defined externally, for example on the compiler command line.
+ *
+ * If it is not defined,
+ * a runtime check (which is usually constant folded) is used instead.
+ *
+ * @note
+ * This is not necessarily defined to an integer constant.
+ *
+ * @see XXH_isLittleEndian() for the runtime check.
+ */
+#ifndef XXH_CPU_LITTLE_ENDIAN
+/*
+ * Try to detect endianness automatically, to avoid the nonstandard behavior
+ * in `XXH_isLittleEndian()`
+ */
+# if defined(_WIN32) /* Windows is always little endian */ \
+ || defined(__LITTLE_ENDIAN__) \
+ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
+# define XXH_CPU_LITTLE_ENDIAN 1
+# elif defined(__BIG_ENDIAN__) \
+ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
+# define XXH_CPU_LITTLE_ENDIAN 0
+# else
+/*!
+ * @internal
+ * @brief Runtime check for @ref XXH_CPU_LITTLE_ENDIAN.
+ *
+ * Most compilers will constant fold this.
+ */
+static int XXH_isLittleEndian(void)
+{
+ /*
+ * Portable and well-defined behavior.
+ * Don't use static: it is detrimental to performance.
+ */
+ const union { xxh_u32 u; xxh_u8 c[4]; } one = { 1 };
+ return one.c[0];
+}
+# define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian()
+# endif
+#endif
+
+
+
+
+/* ****************************************
+* Compiler-specific Functions and Macros
+******************************************/
+#define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
+
+#ifdef __has_builtin
+# define XXH_HAS_BUILTIN(x) __has_builtin(x)
+#else
+# define XXH_HAS_BUILTIN(x) 0
+#endif
+
+
+
+/*
+ * C23 and future versions have standard "unreachable()".
+ * Once it has been implemented reliably we can add it as an
+ * additional case:
+ *
+ * ```
+ * #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= XXH_C23_VN)
+ * # include <stddef.h>
+ * # ifdef unreachable
+ * # define XXH_UNREACHABLE() unreachable()
+ * # endif
+ * #endif
+ * ```
+ *
+ * Note C++23 also has std::unreachable() which can be detected
+ * as follows:
+ * ```
+ * #if defined(__cpp_lib_unreachable) && (__cpp_lib_unreachable >= 202202L)
+ * # include <utility>
+ * # define XXH_UNREACHABLE() std::unreachable()
+ * #endif
+ * ```
+ * NB: `__cpp_lib_unreachable` is defined in the `<version>` header.
+ * We don't use that as including `<utility>` in `extern "C"` blocks
+ * doesn't work on GCC12
+ */
+
+#if XXH_HAS_BUILTIN(__builtin_unreachable)
+# define XXH_UNREACHABLE() __builtin_unreachable()
+
+#elif defined(_MSC_VER)
+# define XXH_UNREACHABLE() __assume(0)
+
+#else
+# define XXH_UNREACHABLE()
+#endif
+
+#if XXH_HAS_BUILTIN(__builtin_assume)
+# define XXH_ASSUME(c) __builtin_assume(c)
+#else
+# define XXH_ASSUME(c) if (!(c)) { XXH_UNREACHABLE(); }
+#endif
+
+/*!
+ * @internal
+ * @def XXH_rotl32(x,r)
+ * @brief 32-bit rotate left.
+ *
+ * @param x The 32-bit integer to be rotated.
+ * @param r The number of bits to rotate.
+ * @pre
+ * @p r > 0 && @p r < 32
+ * @note
+ * @p x and @p r may be evaluated multiple times.
+ * @return The rotated result.
+ */
+#if !defined(NO_CLANG_BUILTIN) && XXH_HAS_BUILTIN(__builtin_rotateleft32) \
+ && XXH_HAS_BUILTIN(__builtin_rotateleft64)
+# define XXH_rotl32 __builtin_rotateleft32
+# define XXH_rotl64 __builtin_rotateleft64
+/* Note: although _rotl exists for minGW (GCC under windows), performance seems poor */
+#elif defined(_MSC_VER)
+# define XXH_rotl32(x,r) _rotl(x,r)
+# define XXH_rotl64(x,r) _rotl64(x,r)
+#else
+# define XXH_rotl32(x,r) (((x) << (r)) | ((x) >> (32 - (r))))
+# define XXH_rotl64(x,r) (((x) << (r)) | ((x) >> (64 - (r))))
+#endif
+
+/*!
+ * @internal
+ * @fn xxh_u32 XXH_swap32(xxh_u32 x)
+ * @brief A 32-bit byteswap.
+ *
+ * @param x The 32-bit integer to byteswap.
+ * @return @p x, byteswapped.
+ */
+#if defined(_MSC_VER) /* Visual Studio */
+# define XXH_swap32 _byteswap_ulong
+#elif XXH_GCC_VERSION >= 403
+# define XXH_swap32 __builtin_bswap32
+#else
+static xxh_u32 XXH_swap32 (xxh_u32 x)
+{
+ return ((x << 24) & 0xff000000 ) |
+ ((x << 8) & 0x00ff0000 ) |
+ ((x >> 8) & 0x0000ff00 ) |
+ ((x >> 24) & 0x000000ff );
+}
+#endif
+
+
+/* ***************************
+* Memory reads
+*****************************/
+
+/*!
+ * @internal
+ * @brief Enum to indicate whether a pointer is aligned.
+ */
+typedef enum {
+ XXH_aligned, /*!< Aligned */
+ XXH_unaligned /*!< Possibly unaligned */
+} XXH_alignment;
+
+/*
+ * XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load.
+ *
+ * This is ideal for older compilers which don't inline memcpy.
+ */
+#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
+
+XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* memPtr)
+{
+ const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
+ return bytePtr[0]
+ | ((xxh_u32)bytePtr[1] << 8)
+ | ((xxh_u32)bytePtr[2] << 16)
+ | ((xxh_u32)bytePtr[3] << 24);
+}
+
+XXH_FORCE_INLINE xxh_u32 XXH_readBE32(const void* memPtr)
+{
+ const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
+ return bytePtr[3]
+ | ((xxh_u32)bytePtr[2] << 8)
+ | ((xxh_u32)bytePtr[1] << 16)
+ | ((xxh_u32)bytePtr[0] << 24);
+}
+
+#else
+XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* ptr)
+{
+ return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
+}
+
+static xxh_u32 XXH_readBE32(const void* ptr)
+{
+ return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
+}
+#endif
+
+XXH_FORCE_INLINE xxh_u32
+XXH_readLE32_align(const void* ptr, XXH_alignment align)
+{
+ if (align==XXH_unaligned) {
+ return XXH_readLE32(ptr);
+ } else {
+ return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u32*)ptr : XXH_swap32(*(const xxh_u32*)ptr);
+ }
+}
+
+
+/* *************************************
+* Misc
+***************************************/
+/*! @ingroup public */
+XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
+
+
+/* *******************************************************************
+* 32-bit hash functions
+*********************************************************************/
+/*!
+ * @}
+ * @defgroup XXH32_impl XXH32 implementation
+ * @ingroup impl
+ *
+ * Details on the XXH32 implementation.
+ * @{
+ */
+ /* #define instead of static const, to be used as initializers */
+#define XXH_PRIME32_1 0x9E3779B1U /*!< 0b10011110001101110111100110110001 */
+#define XXH_PRIME32_2 0x85EBCA77U /*!< 0b10000101111010111100101001110111 */
+#define XXH_PRIME32_3 0xC2B2AE3DU /*!< 0b11000010101100101010111000111101 */
+#define XXH_PRIME32_4 0x27D4EB2FU /*!< 0b00100111110101001110101100101111 */
+#define XXH_PRIME32_5 0x165667B1U /*!< 0b00010110010101100110011110110001 */
+
+#ifdef XXH_OLD_NAMES
+# define PRIME32_1 XXH_PRIME32_1
+# define PRIME32_2 XXH_PRIME32_2
+# define PRIME32_3 XXH_PRIME32_3
+# define PRIME32_4 XXH_PRIME32_4
+# define PRIME32_5 XXH_PRIME32_5
+#endif
+
+/*!
+ * @internal
+ * @brief Normal stripe processing routine.
+ *
+ * This shuffles the bits so that any bit from @p input impacts several bits in
+ * @p acc.
+ *
+ * @param acc The accumulator lane.
+ * @param input The stripe of input to mix.
+ * @return The mixed accumulator lane.
+ */
+static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input)
+{
+ acc += input * XXH_PRIME32_2;
+ acc = XXH_rotl32(acc, 13);
+ acc *= XXH_PRIME32_1;
+#if (defined(__SSE4_1__) || defined(__aarch64__) || defined(__wasm_simd128__)) && !defined(XXH_ENABLE_AUTOVECTORIZE)
+ /*
+ * UGLY HACK:
+ * A compiler fence is the only thing that prevents GCC and Clang from
+ * autovectorizing the XXH32 loop (pragmas and attributes don't work for some
+ * reason) without globally disabling SSE4.1.
+ *
+ * The reason we want to avoid vectorization is because despite working on
+ * 4 integers at a time, there are multiple factors slowing XXH32 down on
+ * SSE4:
+ * - There's a ridiculous amount of lag from pmulld (10 cycles of latency on
+ * newer chips!) making it slightly slower to multiply four integers at
+ * once compared to four integers independently. Even when pmulld was
+ * fastest, Sandy/Ivy Bridge, it is still not worth it to go into SSE
+ * just to multiply unless doing a long operation.
+ *
+ * - Four instructions are required to rotate,
+ * movqda tmp, v // not required with VEX encoding
+ * pslld tmp, 13 // tmp <<= 13
+ * psrld v, 19 // x >>= 19
+ * por v, tmp // x |= tmp
+ * compared to one for scalar:
+ * roll v, 13 // reliably fast across the board
+ * shldl v, v, 13 // Sandy Bridge and later prefer this for some reason
+ *
+ * - Instruction level parallelism is actually more beneficial here because
+ * the SIMD actually serializes this operation: While v1 is rotating, v2
+ * can load data, while v3 can multiply. SSE forces them to operate
+ * together.
+ *
+ * This is also enabled on AArch64, as Clang is *very aggressive* in vectorizing
+ * the loop. NEON is only faster on the A53, and with the newer cores, it is less
+ * than half the speed.
+ *
+ * Additionally, this is used on WASM SIMD128 because it JITs to the same
+ * SIMD instructions and has the same issue.
+ */
+ XXH_COMPILER_GUARD(acc);
+#endif
+ return acc;
+}
+
+/*!
+ * @internal
+ * @brief Mixes all bits to finalize the hash.
+ *
+ * The final mix ensures that all input bits have a chance to impact any bit in
+ * the output digest, resulting in an unbiased distribution.
+ *
+ * @param hash The hash to avalanche.
+ * @return The avalanched hash.
+ */
+static xxh_u32 XXH32_avalanche(xxh_u32 hash)
+{
+ hash ^= hash >> 15;
+ hash *= XXH_PRIME32_2;
+ hash ^= hash >> 13;
+ hash *= XXH_PRIME32_3;
+ hash ^= hash >> 16;
+ return hash;
+}
+
+#define XXH_get32bits(p) XXH_readLE32_align(p, align)
+
+/*!
+ * @internal
+ * @brief Processes the last 0-15 bytes of @p ptr.
+ *
+ * There may be up to 15 bytes remaining to consume from the input.
+ * This final stage will digest them to ensure that all input bytes are present
+ * in the final mix.
+ *
+ * @param hash The hash to finalize.
+ * @param ptr The pointer to the remaining input.
+ * @param len The remaining length, modulo 16.
+ * @param align Whether @p ptr is aligned.
+ * @return The finalized hash.
+ * @see XXH64_finalize().
+ */
+static XXH_PUREF xxh_u32
+XXH32_finalize(xxh_u32 hash, const xxh_u8* ptr, size_t len, XXH_alignment align)
+{
+#define XXH_PROCESS1 do { \
+ hash += (*ptr++) * XXH_PRIME32_5; \
+ hash = XXH_rotl32(hash, 11) * XXH_PRIME32_1; \
+} while (0)
+
+#define XXH_PROCESS4 do { \
+ hash += XXH_get32bits(ptr) * XXH_PRIME32_3; \
+ ptr += 4; \
+ hash = XXH_rotl32(hash, 17) * XXH_PRIME32_4; \
+} while (0)
+
+ if (ptr==NULL) XXH_ASSERT(len == 0);
+
+ /* Compact rerolled version; generally faster */
+ if (!XXH32_ENDJMP) {
+ len &= 15;
+ while (len >= 4) {
+ XXH_PROCESS4;
+ len -= 4;
+ }
+ while (len > 0) {
+ XXH_PROCESS1;
+ --len;
+ }
+ return XXH32_avalanche(hash);
+ } else {
+ switch(len&15) /* or switch(bEnd - p) */ {
+ case 12: XXH_PROCESS4;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 8: XXH_PROCESS4;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 4: XXH_PROCESS4;
+ return XXH32_avalanche(hash);
+
+ case 13: XXH_PROCESS4;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 9: XXH_PROCESS4;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 5: XXH_PROCESS4;
+ XXH_PROCESS1;
+ return XXH32_avalanche(hash);
+
+ case 14: XXH_PROCESS4;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 10: XXH_PROCESS4;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 6: XXH_PROCESS4;
+ XXH_PROCESS1;
+ XXH_PROCESS1;
+ return XXH32_avalanche(hash);
+
+ case 15: XXH_PROCESS4;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 11: XXH_PROCESS4;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 7: XXH_PROCESS4;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 3: XXH_PROCESS1;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 2: XXH_PROCESS1;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 1: XXH_PROCESS1;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 0: return XXH32_avalanche(hash);
+ }
+ XXH_ASSERT(0);
+ return hash; /* reaching this point is deemed impossible */
+ }
+}
+
+#ifdef XXH_OLD_NAMES
+# define PROCESS1 XXH_PROCESS1
+# define PROCESS4 XXH_PROCESS4
+#else
+# undef XXH_PROCESS1
+# undef XXH_PROCESS4
+#endif
+
+/*!
+ * @internal
+ * @brief The implementation for @ref XXH32().
+ *
+ * @param input , len , seed Directly passed from @ref XXH32().
+ * @param align Whether @p input is aligned.
+ * @return The calculated hash.
+ */
+XXH_FORCE_INLINE XXH_PUREF xxh_u32
+XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment align)
+{
+ xxh_u32 h32;
+
+ if (input==NULL) XXH_ASSERT(len == 0);
+
+ if (len>=16) {
+ const xxh_u8* const bEnd = input + len;
+ const xxh_u8* const limit = bEnd - 15;
+ xxh_u32 v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
+ xxh_u32 v2 = seed + XXH_PRIME32_2;
+ xxh_u32 v3 = seed + 0;
+ xxh_u32 v4 = seed - XXH_PRIME32_1;
+
+ do {
+ v1 = XXH32_round(v1, XXH_get32bits(input)); input += 4;
+ v2 = XXH32_round(v2, XXH_get32bits(input)); input += 4;
+ v3 = XXH32_round(v3, XXH_get32bits(input)); input += 4;
+ v4 = XXH32_round(v4, XXH_get32bits(input)); input += 4;
+ } while (input < limit);
+
+ h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7)
+ + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
+ } else {
+ h32 = seed + XXH_PRIME32_5;
+ }
+
+ h32 += (xxh_u32)len;
+
+ return XXH32_finalize(h32, input, len&15, align);
+}
+
+/*! @ingroup XXH32_family */
+XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t len, XXH32_hash_t seed)
+{
+#if !defined(XXH_NO_STREAM) && XXH_SIZE_OPT >= 2
+ /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
+ XXH32_state_t state;
+ XXH32_reset(&state, seed);
+ XXH32_update(&state, (const xxh_u8*)input, len);
+ return XXH32_digest(&state);
+#else
+ if (XXH_FORCE_ALIGN_CHECK) {
+ if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */
+ return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
+ } }
+
+ return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
+#endif
+}
+
+
+
+/******* Hash streaming *******/
+#ifndef XXH_NO_STREAM
+/*! @ingroup XXH32_family */
+XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
+{
+ return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
+}
+/*! @ingroup XXH32_family */
+XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
+{
+ XXH_free(statePtr);
+ return XXH_OK;
+}
+
+/*! @ingroup XXH32_family */
+XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState)
+{
+ XXH_memcpy(dstState, srcState, sizeof(*dstState));
+}
+
+/*! @ingroup XXH32_family */
+XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, XXH32_hash_t seed)
+{
+ XXH_ASSERT(statePtr != NULL);
+ memset(statePtr, 0, sizeof(*statePtr));
+ statePtr->v[0] = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
+ statePtr->v[1] = seed + XXH_PRIME32_2;
+ statePtr->v[2] = seed + 0;
+ statePtr->v[3] = seed - XXH_PRIME32_1;
+ return XXH_OK;
+}
+
+
+/*! @ingroup XXH32_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH32_update(XXH32_state_t* state, const void* input, size_t len)
+{
+ if (input==NULL) {
+ XXH_ASSERT(len == 0);
+ return XXH_OK;
+ }
+
+ { const xxh_u8* p = (const xxh_u8*)input;
+ const xxh_u8* const bEnd = p + len;
+
+ state->total_len_32 += (XXH32_hash_t)len;
+ state->large_len |= (XXH32_hash_t)((len>=16) | (state->total_len_32>=16));
+
+ if (state->memsize + len < 16) { /* fill in tmp buffer */
+ XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, len);
+ state->memsize += (XXH32_hash_t)len;
+ return XXH_OK;
+ }
+
+ if (state->memsize) { /* some data left from previous update */
+ XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, 16-state->memsize);
+ { const xxh_u32* p32 = state->mem32;
+ state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p32)); p32++;
+ state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p32)); p32++;
+ state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p32)); p32++;
+ state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p32));
+ }
+ p += 16-state->memsize;
+ state->memsize = 0;
+ }
+
+ if (p <= bEnd-16) {
+ const xxh_u8* const limit = bEnd - 16;
+
+ do {
+ state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p)); p+=4;
+ state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p)); p+=4;
+ state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p)); p+=4;
+ state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p)); p+=4;
+ } while (p<=limit);
+
+ }
+
+ if (p < bEnd) {
+ XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
+ state->memsize = (unsigned)(bEnd-p);
+ }
+ }
+
+ return XXH_OK;
+}
+
+
+/*! @ingroup XXH32_family */
+XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t* state)
+{
+ xxh_u32 h32;
+
+ if (state->large_len) {
+ h32 = XXH_rotl32(state->v[0], 1)
+ + XXH_rotl32(state->v[1], 7)
+ + XXH_rotl32(state->v[2], 12)
+ + XXH_rotl32(state->v[3], 18);
+ } else {
+ h32 = state->v[2] /* == seed */ + XXH_PRIME32_5;
+ }
+
+ h32 += state->total_len_32;
+
+ return XXH32_finalize(h32, (const xxh_u8*)state->mem32, state->memsize, XXH_aligned);
+}
+#endif /* !XXH_NO_STREAM */
+
+/******* Canonical representation *******/
+
+/*!
+ * @ingroup XXH32_family
+ * The default return values from XXH functions are unsigned 32 and 64 bit
+ * integers.
+ *
+ * The canonical representation uses big endian convention, the same convention
+ * as human-readable numbers (large digits first).
+ *
+ * This way, hash values can be written into a file or buffer, remaining
+ * comparable across different systems.
+ *
+ * The following functions allow transformation of hash values to and from their
+ * canonical format.
+ */
+XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
+{
+ XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
+ if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
+ XXH_memcpy(dst, &hash, sizeof(*dst));
+}
+/*! @ingroup XXH32_family */
+XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
+{
+ return XXH_readBE32(src);
+}
+
+
+#ifndef XXH_NO_LONG_LONG
+
+/* *******************************************************************
+* 64-bit hash functions
+*********************************************************************/
+/*!
+ * @}
+ * @ingroup impl
+ * @{
+ */
+/******* Memory access *******/
+
+typedef XXH64_hash_t xxh_u64;
+
+#ifdef XXH_OLD_NAMES
+# define U64 xxh_u64
+#endif
+
+#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
+/*
+ * Manual byteshift. Best for old compilers which don't inline memcpy.
+ * We actually directly use XXH_readLE64 and XXH_readBE64.
+ */
+#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
+
+/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
+static xxh_u64 XXH_read64(const void* memPtr)
+{
+ return *(const xxh_u64*) memPtr;
+}
+
+#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
+
+/*
+ * __attribute__((aligned(1))) is supported by gcc and clang. Originally the
+ * documentation claimed that it only increased the alignment, but actually it
+ * can decrease it on gcc, clang, and icc:
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69502,
+ * https://gcc.godbolt.org/z/xYez1j67Y.
+ */
+#ifdef XXH_OLD_NAMES
+typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) unalign64;
+#endif
+static xxh_u64 XXH_read64(const void* ptr)
+{
+ typedef __attribute__((aligned(1))) xxh_u64 xxh_unalign64;
+ return *((const xxh_unalign64*)ptr);
+}
+
+#else
+
+/*
+ * Portable and safe solution. Generally efficient.
+ * see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html
+ */
+static xxh_u64 XXH_read64(const void* memPtr)
+{
+ xxh_u64 val;
+ XXH_memcpy(&val, memPtr, sizeof(val));
+ return val;
+}
+
+#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
+
+#if defined(_MSC_VER) /* Visual Studio */
+# define XXH_swap64 _byteswap_uint64
+#elif XXH_GCC_VERSION >= 403
+# define XXH_swap64 __builtin_bswap64
+#else
+static xxh_u64 XXH_swap64(xxh_u64 x)
+{
+ return ((x << 56) & 0xff00000000000000ULL) |
+ ((x << 40) & 0x00ff000000000000ULL) |
+ ((x << 24) & 0x0000ff0000000000ULL) |
+ ((x << 8) & 0x000000ff00000000ULL) |
+ ((x >> 8) & 0x00000000ff000000ULL) |
+ ((x >> 24) & 0x0000000000ff0000ULL) |
+ ((x >> 40) & 0x000000000000ff00ULL) |
+ ((x >> 56) & 0x00000000000000ffULL);
+}
+#endif
+
+
+/* XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. */
+#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
+
+XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* memPtr)
+{
+ const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
+ return bytePtr[0]
+ | ((xxh_u64)bytePtr[1] << 8)
+ | ((xxh_u64)bytePtr[2] << 16)
+ | ((xxh_u64)bytePtr[3] << 24)
+ | ((xxh_u64)bytePtr[4] << 32)
+ | ((xxh_u64)bytePtr[5] << 40)
+ | ((xxh_u64)bytePtr[6] << 48)
+ | ((xxh_u64)bytePtr[7] << 56);
+}
+
+XXH_FORCE_INLINE xxh_u64 XXH_readBE64(const void* memPtr)
+{
+ const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
+ return bytePtr[7]
+ | ((xxh_u64)bytePtr[6] << 8)
+ | ((xxh_u64)bytePtr[5] << 16)
+ | ((xxh_u64)bytePtr[4] << 24)
+ | ((xxh_u64)bytePtr[3] << 32)
+ | ((xxh_u64)bytePtr[2] << 40)
+ | ((xxh_u64)bytePtr[1] << 48)
+ | ((xxh_u64)bytePtr[0] << 56);
+}
+
+#else
+XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* ptr)
+{
+ return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
+}
+
+static xxh_u64 XXH_readBE64(const void* ptr)
+{
+ return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
+}
+#endif
+
+XXH_FORCE_INLINE xxh_u64
+XXH_readLE64_align(const void* ptr, XXH_alignment align)
+{
+ if (align==XXH_unaligned)
+ return XXH_readLE64(ptr);
+ else
+ return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u64*)ptr : XXH_swap64(*(const xxh_u64*)ptr);
+}
+
+
+/******* xxh64 *******/
+/*!
+ * @}
+ * @defgroup XXH64_impl XXH64 implementation
+ * @ingroup impl
+ *
+ * Details on the XXH64 implementation.
+ * @{
+ */
+/* #define rather that static const, to be used as initializers */
+#define XXH_PRIME64_1 0x9E3779B185EBCA87ULL /*!< 0b1001111000110111011110011011000110000101111010111100101010000111 */
+#define XXH_PRIME64_2 0xC2B2AE3D27D4EB4FULL /*!< 0b1100001010110010101011100011110100100111110101001110101101001111 */
+#define XXH_PRIME64_3 0x165667B19E3779F9ULL /*!< 0b0001011001010110011001111011000110011110001101110111100111111001 */
+#define XXH_PRIME64_4 0x85EBCA77C2B2AE63ULL /*!< 0b1000010111101011110010100111011111000010101100101010111001100011 */
+#define XXH_PRIME64_5 0x27D4EB2F165667C5ULL /*!< 0b0010011111010100111010110010111100010110010101100110011111000101 */
+
+#ifdef XXH_OLD_NAMES
+# define PRIME64_1 XXH_PRIME64_1
+# define PRIME64_2 XXH_PRIME64_2
+# define PRIME64_3 XXH_PRIME64_3
+# define PRIME64_4 XXH_PRIME64_4
+# define PRIME64_5 XXH_PRIME64_5
+#endif
+
+/*! @copydoc XXH32_round */
+static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input)
+{
+ acc += input * XXH_PRIME64_2;
+ acc = XXH_rotl64(acc, 31);
+ acc *= XXH_PRIME64_1;
+ return acc;
+}
+
+static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val)
+{
+ val = XXH64_round(0, val);
+ acc ^= val;
+ acc = acc * XXH_PRIME64_1 + XXH_PRIME64_4;
+ return acc;
+}
+
+/*! @copydoc XXH32_avalanche */
+static xxh_u64 XXH64_avalanche(xxh_u64 hash)
+{
+ hash ^= hash >> 33;
+ hash *= XXH_PRIME64_2;
+ hash ^= hash >> 29;
+ hash *= XXH_PRIME64_3;
+ hash ^= hash >> 32;
+ return hash;
+}
+
+
+#define XXH_get64bits(p) XXH_readLE64_align(p, align)
+
+/*!
+ * @internal
+ * @brief Processes the last 0-31 bytes of @p ptr.
+ *
+ * There may be up to 31 bytes remaining to consume from the input.
+ * This final stage will digest them to ensure that all input bytes are present
+ * in the final mix.
+ *
+ * @param hash The hash to finalize.
+ * @param ptr The pointer to the remaining input.
+ * @param len The remaining length, modulo 32.
+ * @param align Whether @p ptr is aligned.
+ * @return The finalized hash
+ * @see XXH32_finalize().
+ */
+static XXH_PUREF xxh_u64
+XXH64_finalize(xxh_u64 hash, const xxh_u8* ptr, size_t len, XXH_alignment align)
+{
+ if (ptr==NULL) XXH_ASSERT(len == 0);
+ len &= 31;
+ while (len >= 8) {
+ xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr));
+ ptr += 8;
+ hash ^= k1;
+ hash = XXH_rotl64(hash,27) * XXH_PRIME64_1 + XXH_PRIME64_4;
+ len -= 8;
+ }
+ if (len >= 4) {
+ hash ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1;
+ ptr += 4;
+ hash = XXH_rotl64(hash, 23) * XXH_PRIME64_2 + XXH_PRIME64_3;
+ len -= 4;
+ }
+ while (len > 0) {
+ hash ^= (*ptr++) * XXH_PRIME64_5;
+ hash = XXH_rotl64(hash, 11) * XXH_PRIME64_1;
+ --len;
+ }
+ return XXH64_avalanche(hash);
+}
+
+#ifdef XXH_OLD_NAMES
+# define PROCESS1_64 XXH_PROCESS1_64
+# define PROCESS4_64 XXH_PROCESS4_64
+# define PROCESS8_64 XXH_PROCESS8_64
+#else
+# undef XXH_PROCESS1_64
+# undef XXH_PROCESS4_64
+# undef XXH_PROCESS8_64
+#endif
+
+/*!
+ * @internal
+ * @brief The implementation for @ref XXH64().
+ *
+ * @param input , len , seed Directly passed from @ref XXH64().
+ * @param align Whether @p input is aligned.
+ * @return The calculated hash.
+ */
+XXH_FORCE_INLINE XXH_PUREF xxh_u64
+XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment align)
+{
+ xxh_u64 h64;
+ if (input==NULL) XXH_ASSERT(len == 0);
+
+ if (len>=32) {
+ const xxh_u8* const bEnd = input + len;
+ const xxh_u8* const limit = bEnd - 31;
+ xxh_u64 v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
+ xxh_u64 v2 = seed + XXH_PRIME64_2;
+ xxh_u64 v3 = seed + 0;
+ xxh_u64 v4 = seed - XXH_PRIME64_1;
+
+ do {
+ v1 = XXH64_round(v1, XXH_get64bits(input)); input+=8;
+ v2 = XXH64_round(v2, XXH_get64bits(input)); input+=8;
+ v3 = XXH64_round(v3, XXH_get64bits(input)); input+=8;
+ v4 = XXH64_round(v4, XXH_get64bits(input)); input+=8;
+ } while (input<limit);
+
+ h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
+ h64 = XXH64_mergeRound(h64, v1);
+ h64 = XXH64_mergeRound(h64, v2);
+ h64 = XXH64_mergeRound(h64, v3);
+ h64 = XXH64_mergeRound(h64, v4);
+
+ } else {
+ h64 = seed + XXH_PRIME64_5;
+ }
+
+ h64 += (xxh_u64) len;
+
+ return XXH64_finalize(h64, input, len, align);
+}
+
+
+/*! @ingroup XXH64_family */
+XXH_PUBLIC_API XXH64_hash_t XXH64 (XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed)
+{
+#if !defined(XXH_NO_STREAM) && XXH_SIZE_OPT >= 2
+ /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
+ XXH64_state_t state;
+ XXH64_reset(&state, seed);
+ XXH64_update(&state, (const xxh_u8*)input, len);
+ return XXH64_digest(&state);
+#else
+ if (XXH_FORCE_ALIGN_CHECK) {
+ if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */
+ return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
+ } }
+
+ return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
+
+#endif
+}
+
+/******* Hash Streaming *******/
+#ifndef XXH_NO_STREAM
+/*! @ingroup XXH64_family*/
+XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
+{
+ return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
+}
+/*! @ingroup XXH64_family */
+XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
+{
+ XXH_free(statePtr);
+ return XXH_OK;
+}
+
+/*! @ingroup XXH64_family */
+XXH_PUBLIC_API void XXH64_copyState(XXH_NOESCAPE XXH64_state_t* dstState, const XXH64_state_t* srcState)
+{
+ XXH_memcpy(dstState, srcState, sizeof(*dstState));
+}
+
+/*! @ingroup XXH64_family */
+XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH_NOESCAPE XXH64_state_t* statePtr, XXH64_hash_t seed)
+{
+ XXH_ASSERT(statePtr != NULL);
+ memset(statePtr, 0, sizeof(*statePtr));
+ statePtr->v[0] = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
+ statePtr->v[1] = seed + XXH_PRIME64_2;
+ statePtr->v[2] = seed + 0;
+ statePtr->v[3] = seed - XXH_PRIME64_1;
+ return XXH_OK;
+}
+
+/*! @ingroup XXH64_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH64_update (XXH_NOESCAPE XXH64_state_t* state, XXH_NOESCAPE const void* input, size_t len)
+{
+ if (input==NULL) {
+ XXH_ASSERT(len == 0);
+ return XXH_OK;
+ }
+
+ { const xxh_u8* p = (const xxh_u8*)input;
+ const xxh_u8* const bEnd = p + len;
+
+ state->total_len += len;
+
+ if (state->memsize + len < 32) { /* fill in tmp buffer */
+ XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, len);
+ state->memsize += (xxh_u32)len;
+ return XXH_OK;
+ }
+
+ if (state->memsize) { /* tmp buffer is full */
+ XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, 32-state->memsize);
+ state->v[0] = XXH64_round(state->v[0], XXH_readLE64(state->mem64+0));
+ state->v[1] = XXH64_round(state->v[1], XXH_readLE64(state->mem64+1));
+ state->v[2] = XXH64_round(state->v[2], XXH_readLE64(state->mem64+2));
+ state->v[3] = XXH64_round(state->v[3], XXH_readLE64(state->mem64+3));
+ p += 32 - state->memsize;
+ state->memsize = 0;
+ }
+
+ if (p+32 <= bEnd) {
+ const xxh_u8* const limit = bEnd - 32;
+
+ do {
+ state->v[0] = XXH64_round(state->v[0], XXH_readLE64(p)); p+=8;
+ state->v[1] = XXH64_round(state->v[1], XXH_readLE64(p)); p+=8;
+ state->v[2] = XXH64_round(state->v[2], XXH_readLE64(p)); p+=8;
+ state->v[3] = XXH64_round(state->v[3], XXH_readLE64(p)); p+=8;
+ } while (p<=limit);
+
+ }
+
+ if (p < bEnd) {
+ XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
+ state->memsize = (unsigned)(bEnd-p);
+ }
+ }
+
+ return XXH_OK;
+}
+
+
+/*! @ingroup XXH64_family */
+XXH_PUBLIC_API XXH64_hash_t XXH64_digest(XXH_NOESCAPE const XXH64_state_t* state)
+{
+ xxh_u64 h64;
+
+ if (state->total_len >= 32) {
+ h64 = XXH_rotl64(state->v[0], 1) + XXH_rotl64(state->v[1], 7) + XXH_rotl64(state->v[2], 12) + XXH_rotl64(state->v[3], 18);
+ h64 = XXH64_mergeRound(h64, state->v[0]);
+ h64 = XXH64_mergeRound(h64, state->v[1]);
+ h64 = XXH64_mergeRound(h64, state->v[2]);
+ h64 = XXH64_mergeRound(h64, state->v[3]);
+ } else {
+ h64 = state->v[2] /*seed*/ + XXH_PRIME64_5;
+ }
+
+ h64 += (xxh_u64) state->total_len;
+
+ return XXH64_finalize(h64, (const xxh_u8*)state->mem64, (size_t)state->total_len, XXH_aligned);
+}
+#endif /* !XXH_NO_STREAM */
+
+/******* Canonical representation *******/
+
+/*! @ingroup XXH64_family */
+XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH_NOESCAPE XXH64_canonical_t* dst, XXH64_hash_t hash)
+{
+ XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
+ if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
+ XXH_memcpy(dst, &hash, sizeof(*dst));
+}
+
+/*! @ingroup XXH64_family */
+XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(XXH_NOESCAPE const XXH64_canonical_t* src)
+{
+ return XXH_readBE64(src);
+}
+
+#ifndef XXH_NO_XXH3
+
+/* *********************************************************************
+* XXH3
+* New generation hash designed for speed on small keys and vectorization
+************************************************************************ */
+/*!
+ * @}
+ * @defgroup XXH3_impl XXH3 implementation
+ * @ingroup impl
+ * @{
+ */
+
+/* === Compiler specifics === */
+
+#if ((defined(sun) || defined(__sun)) && __cplusplus) /* Solaris includes __STDC_VERSION__ with C++. Tested with GCC 5.5 */
+# define XXH_RESTRICT /* disable */
+#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* >= C99 */
+# define XXH_RESTRICT restrict
+#elif (defined (__GNUC__) && ((__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1))) \
+ || (defined (__clang__)) \
+ || (defined (_MSC_VER) && (_MSC_VER >= 1400)) \
+ || (defined (__INTEL_COMPILER) && (__INTEL_COMPILER >= 1300))
+/*
+ * There are a LOT more compilers that recognize __restrict but this
+ * covers the major ones.
+ */
+# define XXH_RESTRICT __restrict
+#else
+# define XXH_RESTRICT /* disable */
+#endif
+
+#if (defined(__GNUC__) && (__GNUC__ >= 3)) \
+ || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) \
+ || defined(__clang__)
+# define XXH_likely(x) __builtin_expect(x, 1)
+# define XXH_unlikely(x) __builtin_expect(x, 0)
+#else
+# define XXH_likely(x) (x)
+# define XXH_unlikely(x) (x)
+#endif
+
+#ifndef XXH_HAS_INCLUDE
+# ifdef __has_include
+# define XXH_HAS_INCLUDE(x) __has_include(x)
+# else
+# define XXH_HAS_INCLUDE(x) 0
+# endif
+#endif
+
+#if defined(__GNUC__) || defined(__clang__)
+# if defined(__ARM_FEATURE_SVE)
+# include <arm_sve.h>
+# endif
+# if defined(__ARM_NEON__) || defined(__ARM_NEON) \
+ || (defined(_M_ARM) && _M_ARM >= 7) \
+ || defined(_M_ARM64) || defined(_M_ARM64EC) \
+ || (defined(__wasm_simd128__) && XXH_HAS_INCLUDE(<arm_neon.h>)) /* WASM SIMD128 via SIMDe */
+# define inline __inline__ /* circumvent a clang bug */
+# include <arm_neon.h>
+# undef inline
+# elif defined(__AVX2__)
+# include <immintrin.h>
+# elif defined(__SSE2__)
+# include <emmintrin.h>
+# endif
+#endif
+
+#if defined(_MSC_VER)
+# include <intrin.h>
+#endif
+
+/*
+ * One goal of XXH3 is to make it fast on both 32-bit and 64-bit, while
+ * remaining a true 64-bit/128-bit hash function.
+ *
+ * This is done by prioritizing a subset of 64-bit operations that can be
+ * emulated without too many steps on the average 32-bit machine.
+ *
+ * For example, these two lines seem similar, and run equally fast on 64-bit:
+ *
+ * xxh_u64 x;
+ * x ^= (x >> 47); // good
+ * x ^= (x >> 13); // bad
+ *
+ * However, to a 32-bit machine, there is a major difference.
+ *
+ * x ^= (x >> 47) looks like this:
+ *
+ * x.lo ^= (x.hi >> (47 - 32));
+ *
+ * while x ^= (x >> 13) looks like this:
+ *
+ * // note: funnel shifts are not usually cheap.
+ * x.lo ^= (x.lo >> 13) | (x.hi << (32 - 13));
+ * x.hi ^= (x.hi >> 13);
+ *
+ * The first one is significantly faster than the second, simply because the
+ * shift is larger than 32. This means:
+ * - All the bits we need are in the upper 32 bits, so we can ignore the lower
+ * 32 bits in the shift.
+ * - The shift result will always fit in the lower 32 bits, and therefore,
+ * we can ignore the upper 32 bits in the xor.
+ *
+ * Thanks to this optimization, XXH3 only requires these features to be efficient:
+ *
+ * - Usable unaligned access
+ * - A 32-bit or 64-bit ALU
+ * - If 32-bit, a decent ADC instruction
+ * - A 32 or 64-bit multiply with a 64-bit result
+ * - For the 128-bit variant, a decent byteswap helps short inputs.
+ *
+ * The first two are already required by XXH32, and almost all 32-bit and 64-bit
+ * platforms which can run XXH32 can run XXH3 efficiently.
+ *
+ * Thumb-1, the classic 16-bit only subset of ARM's instruction set, is one
+ * notable exception.
+ *
+ * First of all, Thumb-1 lacks support for the UMULL instruction which
+ * performs the important long multiply. This means numerous __aeabi_lmul
+ * calls.
+ *
+ * Second of all, the 8 functional registers are just not enough.
+ * Setup for __aeabi_lmul, byteshift loads, pointers, and all arithmetic need
+ * Lo registers, and this shuffling results in thousands more MOVs than A32.
+ *
+ * A32 and T32 don't have this limitation. They can access all 14 registers,
+ * do a 32->64 multiply with UMULL, and the flexible operand allowing free
+ * shifts is helpful, too.
+ *
+ * Therefore, we do a quick sanity check.
+ *
+ * If compiling Thumb-1 for a target which supports ARM instructions, we will
+ * emit a warning, as it is not a "sane" platform to compile for.
+ *
+ * Usually, if this happens, it is because of an accident and you probably need
+ * to specify -march, as you likely meant to compile for a newer architecture.
+ *
+ * Credit: large sections of the vectorial and asm source code paths
+ * have been contributed by @easyaspi314
+ */
+#if defined(__thumb__) && !defined(__thumb2__) && defined(__ARM_ARCH_ISA_ARM)
+# warning "XXH3 is highly inefficient without ARM or Thumb-2."
+#endif
+
+/* ==========================================
+ * Vectorization detection
+ * ========================================== */
+
+#ifdef XXH_DOXYGEN
+/*!
+ * @ingroup tuning
+ * @brief Overrides the vectorization implementation chosen for XXH3.
+ *
+ * Can be defined to 0 to disable SIMD or any of the values mentioned in
+ * @ref XXH_VECTOR_TYPE.
+ *
+ * If this is not defined, it uses predefined macros to determine the best
+ * implementation.
+ */
+# define XXH_VECTOR XXH_SCALAR
+/*!
+ * @ingroup tuning
+ * @brief Possible values for @ref XXH_VECTOR.
+ *
+ * Note that these are actually implemented as macros.
+ *
+ * If this is not defined, it is detected automatically.
+ * internal macro XXH_X86DISPATCH overrides this.
+ */
+enum XXH_VECTOR_TYPE /* fake enum */ {
+ XXH_SCALAR = 0, /*!< Portable scalar version */
+ XXH_SSE2 = 1, /*!<
+ * SSE2 for Pentium 4, Opteron, all x86_64.
+ *
+ * @note SSE2 is also guaranteed on Windows 10, macOS, and
+ * Android x86.
+ */
+ XXH_AVX2 = 2, /*!< AVX2 for Haswell and Bulldozer */
+ XXH_AVX512 = 3, /*!< AVX512 for Skylake and Icelake */
+ XXH_NEON = 4, /*!<
+ * NEON for most ARMv7-A, all AArch64, and WASM SIMD128
+ * via the SIMDeverywhere polyfill provided with the
+ * Emscripten SDK.
+ */
+ XXH_VSX = 5, /*!< VSX and ZVector for POWER8/z13 (64-bit) */
+ XXH_SVE = 6, /*!< SVE for some ARMv8-A and ARMv9-A */
+};
+/*!
+ * @ingroup tuning
+ * @brief Selects the minimum alignment for XXH3's accumulators.
+ *
+ * When using SIMD, this should match the alignment required for said vector
+ * type, so, for example, 32 for AVX2.
+ *
+ * Default: Auto detected.
+ */
+# define XXH_ACC_ALIGN 8
+#endif
+
+/* Actual definition */
+#ifndef XXH_DOXYGEN
+# define XXH_SCALAR 0
+# define XXH_SSE2 1
+# define XXH_AVX2 2
+# define XXH_AVX512 3
+# define XXH_NEON 4
+# define XXH_VSX 5
+# define XXH_SVE 6
+#endif
+
+#ifndef XXH_VECTOR /* can be defined on command line */
+# if defined(__ARM_FEATURE_SVE)
+# define XXH_VECTOR XXH_SVE
+# elif ( \
+ defined(__ARM_NEON__) || defined(__ARM_NEON) /* gcc */ \
+ || defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) /* msvc */ \
+ || (defined(__wasm_simd128__) && XXH_HAS_INCLUDE(<arm_neon.h>)) /* wasm simd128 via SIMDe */ \
+ ) && ( \
+ defined(_WIN32) || defined(__LITTLE_ENDIAN__) /* little endian only */ \
+ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \
+ )
+# define XXH_VECTOR XXH_NEON
+# elif defined(__AVX512F__)
+# define XXH_VECTOR XXH_AVX512
+# elif defined(__AVX2__)
+# define XXH_VECTOR XXH_AVX2
+# elif defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || (defined(_M_IX86_FP) && (_M_IX86_FP == 2))
+# define XXH_VECTOR XXH_SSE2
+# elif (defined(__PPC64__) && defined(__POWER8_VECTOR__)) \
+ || (defined(__s390x__) && defined(__VEC__)) \
+ && defined(__GNUC__) /* TODO: IBM XL */
+# define XXH_VECTOR XXH_VSX
+# else
+# define XXH_VECTOR XXH_SCALAR
+# endif
+#endif
+
+/* __ARM_FEATURE_SVE is only supported by GCC & Clang. */
+#if (XXH_VECTOR == XXH_SVE) && !defined(__ARM_FEATURE_SVE)
+# ifdef _MSC_VER
+# pragma warning(once : 4606)
+# else
+# warning "__ARM_FEATURE_SVE isn't supported. Use SCALAR instead."
+# endif
+# undef XXH_VECTOR
+# define XXH_VECTOR XXH_SCALAR
+#endif
+
+/*
+ * Controls the alignment of the accumulator,
+ * for compatibility with aligned vector loads, which are usually faster.
+ */
+#ifndef XXH_ACC_ALIGN
+# if defined(XXH_X86DISPATCH)
+# define XXH_ACC_ALIGN 64 /* for compatibility with avx512 */
+# elif XXH_VECTOR == XXH_SCALAR /* scalar */
+# define XXH_ACC_ALIGN 8
+# elif XXH_VECTOR == XXH_SSE2 /* sse2 */
+# define XXH_ACC_ALIGN 16
+# elif XXH_VECTOR == XXH_AVX2 /* avx2 */
+# define XXH_ACC_ALIGN 32
+# elif XXH_VECTOR == XXH_NEON /* neon */
+# define XXH_ACC_ALIGN 16
+# elif XXH_VECTOR == XXH_VSX /* vsx */
+# define XXH_ACC_ALIGN 16
+# elif XXH_VECTOR == XXH_AVX512 /* avx512 */
+# define XXH_ACC_ALIGN 64
+# elif XXH_VECTOR == XXH_SVE /* sve */
+# define XXH_ACC_ALIGN 64
+# endif
+#endif
+
+#if defined(XXH_X86DISPATCH) || XXH_VECTOR == XXH_SSE2 \
+ || XXH_VECTOR == XXH_AVX2 || XXH_VECTOR == XXH_AVX512
+# define XXH_SEC_ALIGN XXH_ACC_ALIGN
+#elif XXH_VECTOR == XXH_SVE
+# define XXH_SEC_ALIGN XXH_ACC_ALIGN
+#else
+# define XXH_SEC_ALIGN 8
+#endif
+
+#if defined(__GNUC__) || defined(__clang__)
+# define XXH_ALIASING __attribute__((may_alias))
+#else
+# define XXH_ALIASING /* nothing */
+#endif
+
+/*
+ * UGLY HACK:
+ * GCC usually generates the best code with -O3 for xxHash.
+ *
+ * However, when targeting AVX2, it is overzealous in its unrolling resulting
+ * in code roughly 3/4 the speed of Clang.
+ *
+ * There are other issues, such as GCC splitting _mm256_loadu_si256 into
+ * _mm_loadu_si128 + _mm256_inserti128_si256. This is an optimization which
+ * only applies to Sandy and Ivy Bridge... which don't even support AVX2.
+ *
+ * That is why when compiling the AVX2 version, it is recommended to use either
+ * -O2 -mavx2 -march=haswell
+ * or
+ * -O2 -mavx2 -mno-avx256-split-unaligned-load
+ * for decent performance, or to use Clang instead.
+ *
+ * Fortunately, we can control the first one with a pragma that forces GCC into
+ * -O2, but the other one we can't control without "failed to inline always
+ * inline function due to target mismatch" warnings.
+ */
+#if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \
+ && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
+ && defined(__OPTIMIZE__) && XXH_SIZE_OPT <= 0 /* respect -O0 and -Os */
+# pragma GCC push_options
+# pragma GCC optimize("-O2")
+#endif
+
+#if XXH_VECTOR == XXH_NEON
+
+/*
+ * UGLY HACK: While AArch64 GCC on Linux does not seem to care, on macOS, GCC -O3
+ * optimizes out the entire hashLong loop because of the aliasing violation.
+ *
+ * However, GCC is also inefficient at load-store optimization with vld1q/vst1q,
+ * so the only option is to mark it as aliasing.
+ */
+typedef uint64x2_t xxh_aliasing_uint64x2_t XXH_ALIASING;
+
+/*!
+ * @internal
+ * @brief `vld1q_u64` but faster and alignment-safe.
+ *
+ * On AArch64, unaligned access is always safe, but on ARMv7-a, it is only
+ * *conditionally* safe (`vld1` has an alignment bit like `movdq[ua]` in x86).
+ *
+ * GCC for AArch64 sees `vld1q_u8` as an intrinsic instead of a load, so it
+ * prohibits load-store optimizations. Therefore, a direct dereference is used.
+ *
+ * Otherwise, `vld1q_u8` is used with `vreinterpretq_u8_u64` to do a safe
+ * unaligned load.
+ */
+#if defined(__aarch64__) && defined(__GNUC__) && !defined(__clang__)
+XXH_FORCE_INLINE uint64x2_t XXH_vld1q_u64(void const* ptr) /* silence -Wcast-align */
+{
+ return *(xxh_aliasing_uint64x2_t const *)ptr;
+}
+#else
+XXH_FORCE_INLINE uint64x2_t XXH_vld1q_u64(void const* ptr)
+{
+ return vreinterpretq_u64_u8(vld1q_u8((uint8_t const*)ptr));
+}
+#endif
+
+/*!
+ * @internal
+ * @brief `vmlal_u32` on low and high halves of a vector.
+ *
+ * This is a workaround for AArch64 GCC < 11 which implemented arm_neon.h with
+ * inline assembly and were therefore incapable of merging the `vget_{low, high}_u32`
+ * with `vmlal_u32`.
+ */
+#if defined(__aarch64__) && defined(__GNUC__) && !defined(__clang__) && __GNUC__ < 11
+XXH_FORCE_INLINE uint64x2_t
+XXH_vmlal_low_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs)
+{
+ /* Inline assembly is the only way */
+ __asm__("umlal %0.2d, %1.2s, %2.2s" : "+w" (acc) : "w" (lhs), "w" (rhs));
+ return acc;
+}
+XXH_FORCE_INLINE uint64x2_t
+XXH_vmlal_high_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs)
+{
+ /* This intrinsic works as expected */
+ return vmlal_high_u32(acc, lhs, rhs);
+}
+#else
+/* Portable intrinsic versions */
+XXH_FORCE_INLINE uint64x2_t
+XXH_vmlal_low_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs)
+{
+ return vmlal_u32(acc, vget_low_u32(lhs), vget_low_u32(rhs));
+}
+/*! @copydoc XXH_vmlal_low_u32
+ * Assume the compiler converts this to vmlal_high_u32 on aarch64 */
+XXH_FORCE_INLINE uint64x2_t
+XXH_vmlal_high_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs)
+{
+ return vmlal_u32(acc, vget_high_u32(lhs), vget_high_u32(rhs));
+}
+#endif
+
+/*!
+ * @ingroup tuning
+ * @brief Controls the NEON to scalar ratio for XXH3
+ *
+ * This can be set to 2, 4, 6, or 8.
+ *
+ * ARM Cortex CPUs are _very_ sensitive to how their pipelines are used.
+ *
+ * For example, the Cortex-A73 can dispatch 3 micro-ops per cycle, but only 2 of those
+ * can be NEON. If you are only using NEON instructions, you are only using 2/3 of the CPU
+ * bandwidth.
+ *
+ * This is even more noticeable on the more advanced cores like the Cortex-A76 which
+ * can dispatch 8 micro-ops per cycle, but still only 2 NEON micro-ops at once.
+ *
+ * Therefore, to make the most out of the pipeline, it is beneficial to run 6 NEON lanes
+ * and 2 scalar lanes, which is chosen by default.
+ *
+ * This does not apply to Apple processors or 32-bit processors, which run better with
+ * full NEON. These will default to 8. Additionally, size-optimized builds run 8 lanes.
+ *
+ * This change benefits CPUs with large micro-op buffers without negatively affecting
+ * most other CPUs:
+ *
+ * | Chipset | Dispatch type | NEON only | 6:2 hybrid | Diff. |
+ * |:----------------------|:--------------------|----------:|-----------:|------:|
+ * | Snapdragon 730 (A76) | 2 NEON/8 micro-ops | 8.8 GB/s | 10.1 GB/s | ~16% |
+ * | Snapdragon 835 (A73) | 2 NEON/3 micro-ops | 5.1 GB/s | 5.3 GB/s | ~5% |
+ * | Marvell PXA1928 (A53) | In-order dual-issue | 1.9 GB/s | 1.9 GB/s | 0% |
+ * | Apple M1 | 4 NEON/8 micro-ops | 37.3 GB/s | 36.1 GB/s | ~-3% |
+ *
+ * It also seems to fix some bad codegen on GCC, making it almost as fast as clang.
+ *
+ * When using WASM SIMD128, if this is 2 or 6, SIMDe will scalarize 2 of the lanes meaning
+ * it effectively becomes worse 4.
+ *
+ * @see XXH3_accumulate_512_neon()
+ */
+# ifndef XXH3_NEON_LANES
+# if (defined(__aarch64__) || defined(__arm64__) || defined(_M_ARM64) || defined(_M_ARM64EC)) \
+ && !defined(__APPLE__) && XXH_SIZE_OPT <= 0
+# define XXH3_NEON_LANES 6
+# else
+# define XXH3_NEON_LANES XXH_ACC_NB
+# endif
+# endif
+#endif /* XXH_VECTOR == XXH_NEON */
+
+/*
+ * VSX and Z Vector helpers.
+ *
+ * This is very messy, and any pull requests to clean this up are welcome.
+ *
+ * There are a lot of problems with supporting VSX and s390x, due to
+ * inconsistent intrinsics, spotty coverage, and multiple endiannesses.
+ */
+#if XXH_VECTOR == XXH_VSX
+/* Annoyingly, these headers _may_ define three macros: `bool`, `vector`,
+ * and `pixel`. This is a problem for obvious reasons.
+ *
+ * These keywords are unnecessary; the spec literally says they are
+ * equivalent to `__bool`, `__vector`, and `__pixel` and may be undef'd
+ * after including the header.
+ *
+ * We use pragma push_macro/pop_macro to keep the namespace clean. */
+# pragma push_macro("bool")
+# pragma push_macro("vector")
+# pragma push_macro("pixel")
+/* silence potential macro redefined warnings */
+# undef bool
+# undef vector
+# undef pixel
+
+# if defined(__s390x__)
+# include <s390intrin.h>
+# else
+# include <altivec.h>
+# endif
+
+/* Restore the original macro values, if applicable. */
+# pragma pop_macro("pixel")
+# pragma pop_macro("vector")
+# pragma pop_macro("bool")
+
+typedef __vector unsigned long long xxh_u64x2;
+typedef __vector unsigned char xxh_u8x16;
+typedef __vector unsigned xxh_u32x4;
+
+/*
+ * UGLY HACK: Similar to aarch64 macOS GCC, s390x GCC has the same aliasing issue.
+ */
+typedef xxh_u64x2 xxh_aliasing_u64x2 XXH_ALIASING;
+
+# ifndef XXH_VSX_BE
+# if defined(__BIG_ENDIAN__) \
+ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
+# define XXH_VSX_BE 1
+# elif defined(__VEC_ELEMENT_REG_ORDER__) && __VEC_ELEMENT_REG_ORDER__ == __ORDER_BIG_ENDIAN__
+# warning "-maltivec=be is not recommended. Please use native endianness."
+# define XXH_VSX_BE 1
+# else
+# define XXH_VSX_BE 0
+# endif
+# endif /* !defined(XXH_VSX_BE) */
+
+# if XXH_VSX_BE
+# if defined(__POWER9_VECTOR__) || (defined(__clang__) && defined(__s390x__))
+# define XXH_vec_revb vec_revb
+# else
+/*!
+ * A polyfill for POWER9's vec_revb().
+ */
+XXH_FORCE_INLINE xxh_u64x2 XXH_vec_revb(xxh_u64x2 val)
+{
+ xxh_u8x16 const vByteSwap = { 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
+ 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08 };
+ return vec_perm(val, val, vByteSwap);
+}
+# endif
+# endif /* XXH_VSX_BE */
+
+/*!
+ * Performs an unaligned vector load and byte swaps it on big endian.
+ */
+XXH_FORCE_INLINE xxh_u64x2 XXH_vec_loadu(const void *ptr)
+{
+ xxh_u64x2 ret;
+ XXH_memcpy(&ret, ptr, sizeof(xxh_u64x2));
+# if XXH_VSX_BE
+ ret = XXH_vec_revb(ret);
+# endif
+ return ret;
+}
+
+/*
+ * vec_mulo and vec_mule are very problematic intrinsics on PowerPC
+ *
+ * These intrinsics weren't added until GCC 8, despite existing for a while,
+ * and they are endian dependent. Also, their meaning swap depending on version.
+ * */
+# if defined(__s390x__)
+ /* s390x is always big endian, no issue on this platform */
+# define XXH_vec_mulo vec_mulo
+# define XXH_vec_mule vec_mule
+# elif defined(__clang__) && XXH_HAS_BUILTIN(__builtin_altivec_vmuleuw) && !defined(__ibmxl__)
+/* Clang has a better way to control this, we can just use the builtin which doesn't swap. */
+ /* The IBM XL Compiler (which defined __clang__) only implements the vec_* operations */
+# define XXH_vec_mulo __builtin_altivec_vmulouw
+# define XXH_vec_mule __builtin_altivec_vmuleuw
+# else
+/* gcc needs inline assembly */
+/* Adapted from https://github.com/google/highwayhash/blob/master/highwayhash/hh_vsx.h. */
+XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mulo(xxh_u32x4 a, xxh_u32x4 b)
+{
+ xxh_u64x2 result;
+ __asm__("vmulouw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b));
+ return result;
+}
+XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4 a, xxh_u32x4 b)
+{
+ xxh_u64x2 result;
+ __asm__("vmuleuw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b));
+ return result;
+}
+# endif /* XXH_vec_mulo, XXH_vec_mule */
+#endif /* XXH_VECTOR == XXH_VSX */
+
+#if XXH_VECTOR == XXH_SVE
+#define ACCRND(acc, offset) \
+do { \
+ svuint64_t input_vec = svld1_u64(mask, xinput + offset); \
+ svuint64_t secret_vec = svld1_u64(mask, xsecret + offset); \
+ svuint64_t mixed = sveor_u64_x(mask, secret_vec, input_vec); \
+ svuint64_t swapped = svtbl_u64(input_vec, kSwap); \
+ svuint64_t mixed_lo = svextw_u64_x(mask, mixed); \
+ svuint64_t mixed_hi = svlsr_n_u64_x(mask, mixed, 32); \
+ svuint64_t mul = svmad_u64_x(mask, mixed_lo, mixed_hi, swapped); \
+ acc = svadd_u64_x(mask, acc, mul); \
+} while (0)
+#endif /* XXH_VECTOR == XXH_SVE */
+
+/* prefetch
+ * can be disabled, by declaring XXH_NO_PREFETCH build macro */
+#if defined(XXH_NO_PREFETCH)
+# define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */
+#else
+# if XXH_SIZE_OPT >= 1
+# define XXH_PREFETCH(ptr) (void)(ptr)
+# elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86)) /* _mm_prefetch() not defined outside of x86/x64 */
+# include <mmintrin.h> /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
+# define XXH_PREFETCH(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0)
+# elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )
+# define XXH_PREFETCH(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
+# else
+# define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */
+# endif
+#endif /* XXH_NO_PREFETCH */
+
+
+/* ==========================================
+ * XXH3 default settings
+ * ========================================== */
+
+#define XXH_SECRET_DEFAULT_SIZE 192 /* minimum XXH3_SECRET_SIZE_MIN */
+
+#if (XXH_SECRET_DEFAULT_SIZE < XXH3_SECRET_SIZE_MIN)
+# error "default keyset is not large enough"
+#endif
+
+/*! Pseudorandom secret taken directly from FARSH. */
+XXH_ALIGN(64) static const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = {
+ 0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c, 0xf7, 0x21, 0xad, 0x1c,
+ 0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb, 0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f,
+ 0xcb, 0x79, 0xe6, 0x4e, 0xcc, 0xc0, 0xe5, 0x78, 0x82, 0x5a, 0xd0, 0x7d, 0xcc, 0xff, 0x72, 0x21,
+ 0xb8, 0x08, 0x46, 0x74, 0xf7, 0x43, 0x24, 0x8e, 0xe0, 0x35, 0x90, 0xe6, 0x81, 0x3a, 0x26, 0x4c,
+ 0x3c, 0x28, 0x52, 0xbb, 0x91, 0xc3, 0x00, 0xcb, 0x88, 0xd0, 0x65, 0x8b, 0x1b, 0x53, 0x2e, 0xa3,
+ 0x71, 0x64, 0x48, 0x97, 0xa2, 0x0d, 0xf9, 0x4e, 0x38, 0x19, 0xef, 0x46, 0xa9, 0xde, 0xac, 0xd8,
+ 0xa8, 0xfa, 0x76, 0x3f, 0xe3, 0x9c, 0x34, 0x3f, 0xf9, 0xdc, 0xbb, 0xc7, 0xc7, 0x0b, 0x4f, 0x1d,
+ 0x8a, 0x51, 0xe0, 0x4b, 0xcd, 0xb4, 0x59, 0x31, 0xc8, 0x9f, 0x7e, 0xc9, 0xd9, 0x78, 0x73, 0x64,
+ 0xea, 0xc5, 0xac, 0x83, 0x34, 0xd3, 0xeb, 0xc3, 0xc5, 0x81, 0xa0, 0xff, 0xfa, 0x13, 0x63, 0xeb,
+ 0x17, 0x0d, 0xdd, 0x51, 0xb7, 0xf0, 0xda, 0x49, 0xd3, 0x16, 0x55, 0x26, 0x29, 0xd4, 0x68, 0x9e,
+ 0x2b, 0x16, 0xbe, 0x58, 0x7d, 0x47, 0xa1, 0xfc, 0x8f, 0xf8, 0xb8, 0xd1, 0x7a, 0xd0, 0x31, 0xce,
+ 0x45, 0xcb, 0x3a, 0x8f, 0x95, 0x16, 0x04, 0x28, 0xaf, 0xd7, 0xfb, 0xca, 0xbb, 0x4b, 0x40, 0x7e,
+};
+
+static const xxh_u64 PRIME_MX1 = 0x165667919E3779F9ULL; /*!< 0b0001011001010110011001111001000110011110001101110111100111111001 */
+static const xxh_u64 PRIME_MX2 = 0x9FB21C651E98DF25ULL; /*!< 0b1001111110110010000111000110010100011110100110001101111100100101 */
+
+#ifdef XXH_OLD_NAMES
+# define kSecret XXH3_kSecret
+#endif
+
+#ifdef XXH_DOXYGEN
+/*!
+ * @brief Calculates a 32-bit to 64-bit long multiply.
+ *
+ * Implemented as a macro.
+ *
+ * Wraps `__emulu` on MSVC x86 because it tends to call `__allmul` when it doesn't
+ * need to (but it shouldn't need to anyways, it is about 7 instructions to do
+ * a 64x64 multiply...). Since we know that this will _always_ emit `MULL`, we
+ * use that instead of the normal method.
+ *
+ * If you are compiling for platforms like Thumb-1 and don't have a better option,
+ * you may also want to write your own long multiply routine here.
+ *
+ * @param x, y Numbers to be multiplied
+ * @return 64-bit product of the low 32 bits of @p x and @p y.
+ */
+XXH_FORCE_INLINE xxh_u64
+XXH_mult32to64(xxh_u64 x, xxh_u64 y)
+{
+ return (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF);
+}
+#elif defined(_MSC_VER) && defined(_M_IX86)
+# define XXH_mult32to64(x, y) __emulu((unsigned)(x), (unsigned)(y))
+#else
+/*
+ * Downcast + upcast is usually better than masking on older compilers like
+ * GCC 4.2 (especially 32-bit ones), all without affecting newer compilers.
+ *
+ * The other method, (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF), will AND both operands
+ * and perform a full 64x64 multiply -- entirely redundant on 32-bit.
+ */
+# define XXH_mult32to64(x, y) ((xxh_u64)(xxh_u32)(x) * (xxh_u64)(xxh_u32)(y))
+#endif
+
+/*!
+ * @brief Calculates a 64->128-bit long multiply.
+ *
+ * Uses `__uint128_t` and `_umul128` if available, otherwise uses a scalar
+ * version.
+ *
+ * @param lhs , rhs The 64-bit integers to be multiplied
+ * @return The 128-bit result represented in an @ref XXH128_hash_t.
+ */
+static XXH128_hash_t
+XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs)
+{
+ /*
+ * GCC/Clang __uint128_t method.
+ *
+ * On most 64-bit targets, GCC and Clang define a __uint128_t type.
+ * This is usually the best way as it usually uses a native long 64-bit
+ * multiply, such as MULQ on x86_64 or MUL + UMULH on aarch64.
+ *
+ * Usually.
+ *
+ * Despite being a 32-bit platform, Clang (and emscripten) define this type
+ * despite not having the arithmetic for it. This results in a laggy
+ * compiler builtin call which calculates a full 128-bit multiply.
+ * In that case it is best to use the portable one.
+ * https://github.com/Cyan4973/xxHash/issues/211#issuecomment-515575677
+ */
+#if (defined(__GNUC__) || defined(__clang__)) && !defined(__wasm__) \
+ && defined(__SIZEOF_INT128__) \
+ || (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
+
+ __uint128_t const product = (__uint128_t)lhs * (__uint128_t)rhs;
+ XXH128_hash_t r128;
+ r128.low64 = (xxh_u64)(product);
+ r128.high64 = (xxh_u64)(product >> 64);
+ return r128;
+
+ /*
+ * MSVC for x64's _umul128 method.
+ *
+ * xxh_u64 _umul128(xxh_u64 Multiplier, xxh_u64 Multiplicand, xxh_u64 *HighProduct);
+ *
+ * This compiles to single operand MUL on x64.
+ */
+#elif (defined(_M_X64) || defined(_M_IA64)) && !defined(_M_ARM64EC)
+
+#ifndef _MSC_VER
+# pragma intrinsic(_umul128)
+#endif
+ xxh_u64 product_high;
+ xxh_u64 const product_low = _umul128(lhs, rhs, &product_high);
+ XXH128_hash_t r128;
+ r128.low64 = product_low;
+ r128.high64 = product_high;
+ return r128;
+
+ /*
+ * MSVC for ARM64's __umulh method.
+ *
+ * This compiles to the same MUL + UMULH as GCC/Clang's __uint128_t method.
+ */
+#elif defined(_M_ARM64) || defined(_M_ARM64EC)
+
+#ifndef _MSC_VER
+# pragma intrinsic(__umulh)
+#endif
+ XXH128_hash_t r128;
+ r128.low64 = lhs * rhs;
+ r128.high64 = __umulh(lhs, rhs);
+ return r128;
+
+#else
+ /*
+ * Portable scalar method. Optimized for 32-bit and 64-bit ALUs.
+ *
+ * This is a fast and simple grade school multiply, which is shown below
+ * with base 10 arithmetic instead of base 0x100000000.
+ *
+ * 9 3 // D2 lhs = 93
+ * x 7 5 // D2 rhs = 75
+ * ----------
+ * 1 5 // D2 lo_lo = (93 % 10) * (75 % 10) = 15
+ * 4 5 | // D2 hi_lo = (93 / 10) * (75 % 10) = 45
+ * 2 1 | // D2 lo_hi = (93 % 10) * (75 / 10) = 21
+ * + 6 3 | | // D2 hi_hi = (93 / 10) * (75 / 10) = 63
+ * ---------
+ * 2 7 | // D2 cross = (15 / 10) + (45 % 10) + 21 = 27
+ * + 6 7 | | // D2 upper = (27 / 10) + (45 / 10) + 63 = 67
+ * ---------
+ * 6 9 7 5 // D4 res = (27 * 10) + (15 % 10) + (67 * 100) = 6975
+ *
+ * The reasons for adding the products like this are:
+ * 1. It avoids manual carry tracking. Just like how
+ * (9 * 9) + 9 + 9 = 99, the same applies with this for UINT64_MAX.
+ * This avoids a lot of complexity.
+ *
+ * 2. It hints for, and on Clang, compiles to, the powerful UMAAL
+ * instruction available in ARM's Digital Signal Processing extension
+ * in 32-bit ARMv6 and later, which is shown below:
+ *
+ * void UMAAL(xxh_u32 *RdLo, xxh_u32 *RdHi, xxh_u32 Rn, xxh_u32 Rm)
+ * {
+ * xxh_u64 product = (xxh_u64)*RdLo * (xxh_u64)*RdHi + Rn + Rm;
+ * *RdLo = (xxh_u32)(product & 0xFFFFFFFF);
+ * *RdHi = (xxh_u32)(product >> 32);
+ * }
+ *
+ * This instruction was designed for efficient long multiplication, and
+ * allows this to be calculated in only 4 instructions at speeds
+ * comparable to some 64-bit ALUs.
+ *
+ * 3. It isn't terrible on other platforms. Usually this will be a couple
+ * of 32-bit ADD/ADCs.
+ */
+
+ /* First calculate all of the cross products. */
+ xxh_u64 const lo_lo = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs & 0xFFFFFFFF);
+ xxh_u64 const hi_lo = XXH_mult32to64(lhs >> 32, rhs & 0xFFFFFFFF);
+ xxh_u64 const lo_hi = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs >> 32);
+ xxh_u64 const hi_hi = XXH_mult32to64(lhs >> 32, rhs >> 32);
+
+ /* Now add the products together. These will never overflow. */
+ xxh_u64 const cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi;
+ xxh_u64 const upper = (hi_lo >> 32) + (cross >> 32) + hi_hi;
+ xxh_u64 const lower = (cross << 32) | (lo_lo & 0xFFFFFFFF);
+
+ XXH128_hash_t r128;
+ r128.low64 = lower;
+ r128.high64 = upper;
+ return r128;
+#endif
+}
+
+/*!
+ * @brief Calculates a 64-bit to 128-bit multiply, then XOR folds it.
+ *
+ * The reason for the separate function is to prevent passing too many structs
+ * around by value. This will hopefully inline the multiply, but we don't force it.
+ *
+ * @param lhs , rhs The 64-bit integers to multiply
+ * @return The low 64 bits of the product XOR'd by the high 64 bits.
+ * @see XXH_mult64to128()
+ */
+static xxh_u64
+XXH3_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs)
+{
+ XXH128_hash_t product = XXH_mult64to128(lhs, rhs);
+ return product.low64 ^ product.high64;
+}
+
+/*! Seems to produce slightly better code on GCC for some reason. */
+XXH_FORCE_INLINE XXH_CONSTF xxh_u64 XXH_xorshift64(xxh_u64 v64, int shift)
+{
+ XXH_ASSERT(0 <= shift && shift < 64);
+ return v64 ^ (v64 >> shift);
+}
+
+/*
+ * This is a fast avalanche stage,
+ * suitable when input bits are already partially mixed
+ */
+static XXH64_hash_t XXH3_avalanche(xxh_u64 h64)
+{
+ h64 = XXH_xorshift64(h64, 37);
+ h64 *= PRIME_MX1;
+ h64 = XXH_xorshift64(h64, 32);
+ return h64;
+}
+
+/*
+ * This is a stronger avalanche,
+ * inspired by Pelle Evensen's rrmxmx
+ * preferable when input has not been previously mixed
+ */
+static XXH64_hash_t XXH3_rrmxmx(xxh_u64 h64, xxh_u64 len)
+{
+ /* this mix is inspired by Pelle Evensen's rrmxmx */
+ h64 ^= XXH_rotl64(h64, 49) ^ XXH_rotl64(h64, 24);
+ h64 *= PRIME_MX2;
+ h64 ^= (h64 >> 35) + len ;
+ h64 *= PRIME_MX2;
+ return XXH_xorshift64(h64, 28);
+}
+
+
+/* ==========================================
+ * Short keys
+ * ==========================================
+ * One of the shortcomings of XXH32 and XXH64 was that their performance was
+ * sub-optimal on short lengths. It used an iterative algorithm which strongly
+ * favored lengths that were a multiple of 4 or 8.
+ *
+ * Instead of iterating over individual inputs, we use a set of single shot
+ * functions which piece together a range of lengths and operate in constant time.
+ *
+ * Additionally, the number of multiplies has been significantly reduced. This
+ * reduces latency, especially when emulating 64-bit multiplies on 32-bit.
+ *
+ * Depending on the platform, this may or may not be faster than XXH32, but it
+ * is almost guaranteed to be faster than XXH64.
+ */
+
+/*
+ * At very short lengths, there isn't enough input to fully hide secrets, or use
+ * the entire secret.
+ *
+ * There is also only a limited amount of mixing we can do before significantly
+ * impacting performance.
+ *
+ * Therefore, we use different sections of the secret and always mix two secret
+ * samples with an XOR. This should have no effect on performance on the
+ * seedless or withSeed variants because everything _should_ be constant folded
+ * by modern compilers.
+ *
+ * The XOR mixing hides individual parts of the secret and increases entropy.
+ *
+ * This adds an extra layer of strength for custom secrets.
+ */
+XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
+XXH3_len_1to3_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
+{
+ XXH_ASSERT(input != NULL);
+ XXH_ASSERT(1 <= len && len <= 3);
+ XXH_ASSERT(secret != NULL);
+ /*
+ * len = 1: combined = { input[0], 0x01, input[0], input[0] }
+ * len = 2: combined = { input[1], 0x02, input[0], input[1] }
+ * len = 3: combined = { input[2], 0x03, input[0], input[1] }
+ */
+ { xxh_u8 const c1 = input[0];
+ xxh_u8 const c2 = input[len >> 1];
+ xxh_u8 const c3 = input[len - 1];
+ xxh_u32 const combined = ((xxh_u32)c1 << 16) | ((xxh_u32)c2 << 24)
+ | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8);
+ xxh_u64 const bitflip = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed;
+ xxh_u64 const keyed = (xxh_u64)combined ^ bitflip;
+ return XXH64_avalanche(keyed);
+ }
+}
+
+XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
+XXH3_len_4to8_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
+{
+ XXH_ASSERT(input != NULL);
+ XXH_ASSERT(secret != NULL);
+ XXH_ASSERT(4 <= len && len <= 8);
+ seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
+ { xxh_u32 const input1 = XXH_readLE32(input);
+ xxh_u32 const input2 = XXH_readLE32(input + len - 4);
+ xxh_u64 const bitflip = (XXH_readLE64(secret+8) ^ XXH_readLE64(secret+16)) - seed;
+ xxh_u64 const input64 = input2 + (((xxh_u64)input1) << 32);
+ xxh_u64 const keyed = input64 ^ bitflip;
+ return XXH3_rrmxmx(keyed, len);
+ }
+}
+
+XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
+XXH3_len_9to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
+{
+ XXH_ASSERT(input != NULL);
+ XXH_ASSERT(secret != NULL);
+ XXH_ASSERT(9 <= len && len <= 16);
+ { xxh_u64 const bitflip1 = (XXH_readLE64(secret+24) ^ XXH_readLE64(secret+32)) + seed;
+ xxh_u64 const bitflip2 = (XXH_readLE64(secret+40) ^ XXH_readLE64(secret+48)) - seed;
+ xxh_u64 const input_lo = XXH_readLE64(input) ^ bitflip1;
+ xxh_u64 const input_hi = XXH_readLE64(input + len - 8) ^ bitflip2;
+ xxh_u64 const acc = len
+ + XXH_swap64(input_lo) + input_hi
+ + XXH3_mul128_fold64(input_lo, input_hi);
+ return XXH3_avalanche(acc);
+ }
+}
+
+XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
+XXH3_len_0to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
+{
+ XXH_ASSERT(len <= 16);
+ { if (XXH_likely(len > 8)) return XXH3_len_9to16_64b(input, len, secret, seed);
+ if (XXH_likely(len >= 4)) return XXH3_len_4to8_64b(input, len, secret, seed);
+ if (len) return XXH3_len_1to3_64b(input, len, secret, seed);
+ return XXH64_avalanche(seed ^ (XXH_readLE64(secret+56) ^ XXH_readLE64(secret+64)));
+ }
+}
+
+/*
+ * DISCLAIMER: There are known *seed-dependent* multicollisions here due to
+ * multiplication by zero, affecting hashes of lengths 17 to 240.
+ *
+ * However, they are very unlikely.
+ *
+ * Keep this in mind when using the unseeded XXH3_64bits() variant: As with all
+ * unseeded non-cryptographic hashes, it does not attempt to defend itself
+ * against specially crafted inputs, only random inputs.
+ *
+ * Compared to classic UMAC where a 1 in 2^31 chance of 4 consecutive bytes
+ * cancelling out the secret is taken an arbitrary number of times (addressed
+ * in XXH3_accumulate_512), this collision is very unlikely with random inputs
+ * and/or proper seeding:
+ *
+ * This only has a 1 in 2^63 chance of 8 consecutive bytes cancelling out, in a
+ * function that is only called up to 16 times per hash with up to 240 bytes of
+ * input.
+ *
+ * This is not too bad for a non-cryptographic hash function, especially with
+ * only 64 bit outputs.
+ *
+ * The 128-bit variant (which trades some speed for strength) is NOT affected
+ * by this, although it is always a good idea to use a proper seed if you care
+ * about strength.
+ */
+XXH_FORCE_INLINE xxh_u64 XXH3_mix16B(const xxh_u8* XXH_RESTRICT input,
+ const xxh_u8* XXH_RESTRICT secret, xxh_u64 seed64)
+{
+#if defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
+ && defined(__i386__) && defined(__SSE2__) /* x86 + SSE2 */ \
+ && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable like XXH32 hack */
+ /*
+ * UGLY HACK:
+ * GCC for x86 tends to autovectorize the 128-bit multiply, resulting in
+ * slower code.
+ *
+ * By forcing seed64 into a register, we disrupt the cost model and
+ * cause it to scalarize. See `XXH32_round()`
+ *
+ * FIXME: Clang's output is still _much_ faster -- On an AMD Ryzen 3600,
+ * XXH3_64bits @ len=240 runs at 4.6 GB/s with Clang 9, but 3.3 GB/s on
+ * GCC 9.2, despite both emitting scalar code.
+ *
+ * GCC generates much better scalar code than Clang for the rest of XXH3,
+ * which is why finding a more optimal codepath is an interest.
+ */
+ XXH_COMPILER_GUARD(seed64);
+#endif
+ { xxh_u64 const input_lo = XXH_readLE64(input);
+ xxh_u64 const input_hi = XXH_readLE64(input+8);
+ return XXH3_mul128_fold64(
+ input_lo ^ (XXH_readLE64(secret) + seed64),
+ input_hi ^ (XXH_readLE64(secret+8) - seed64)
+ );
+ }
+}
+
+/* For mid range keys, XXH3 uses a Mum-hash variant. */
+XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
+XXH3_len_17to128_64b(const xxh_u8* XXH_RESTRICT input, size_t len,
+ const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
+ XXH64_hash_t seed)
+{
+ XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
+ XXH_ASSERT(16 < len && len <= 128);
+
+ { xxh_u64 acc = len * XXH_PRIME64_1;
+#if XXH_SIZE_OPT >= 1
+ /* Smaller and cleaner, but slightly slower. */
+ unsigned int i = (unsigned int)(len - 1) / 32;
+ do {
+ acc += XXH3_mix16B(input+16 * i, secret+32*i, seed);
+ acc += XXH3_mix16B(input+len-16*(i+1), secret+32*i+16, seed);
+ } while (i-- != 0);
+#else
+ if (len > 32) {
+ if (len > 64) {
+ if (len > 96) {
+ acc += XXH3_mix16B(input+48, secret+96, seed);
+ acc += XXH3_mix16B(input+len-64, secret+112, seed);
+ }
+ acc += XXH3_mix16B(input+32, secret+64, seed);
+ acc += XXH3_mix16B(input+len-48, secret+80, seed);
+ }
+ acc += XXH3_mix16B(input+16, secret+32, seed);
+ acc += XXH3_mix16B(input+len-32, secret+48, seed);
+ }
+ acc += XXH3_mix16B(input+0, secret+0, seed);
+ acc += XXH3_mix16B(input+len-16, secret+16, seed);
+#endif
+ return XXH3_avalanche(acc);
+ }
+}
+
+#define XXH3_MIDSIZE_MAX 240
+
+XXH_NO_INLINE XXH_PUREF XXH64_hash_t
+XXH3_len_129to240_64b(const xxh_u8* XXH_RESTRICT input, size_t len,
+ const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
+ XXH64_hash_t seed)
+{
+ XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
+ XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
+
+ #define XXH3_MIDSIZE_STARTOFFSET 3
+ #define XXH3_MIDSIZE_LASTOFFSET 17
+
+ { xxh_u64 acc = len * XXH_PRIME64_1;
+ xxh_u64 acc_end;
+ unsigned int const nbRounds = (unsigned int)len / 16;
+ unsigned int i;
+ XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
+ for (i=0; i<8; i++) {
+ acc += XXH3_mix16B(input+(16*i), secret+(16*i), seed);
+ }
+ /* last bytes */
+ acc_end = XXH3_mix16B(input + len - 16, secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET, seed);
+ XXH_ASSERT(nbRounds >= 8);
+ acc = XXH3_avalanche(acc);
+#if defined(__clang__) /* Clang */ \
+ && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \
+ && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */
+ /*
+ * UGLY HACK:
+ * Clang for ARMv7-A tries to vectorize this loop, similar to GCC x86.
+ * In everywhere else, it uses scalar code.
+ *
+ * For 64->128-bit multiplies, even if the NEON was 100% optimal, it
+ * would still be slower than UMAAL (see XXH_mult64to128).
+ *
+ * Unfortunately, Clang doesn't handle the long multiplies properly and
+ * converts them to the nonexistent "vmulq_u64" intrinsic, which is then
+ * scalarized into an ugly mess of VMOV.32 instructions.
+ *
+ * This mess is difficult to avoid without turning autovectorization
+ * off completely, but they are usually relatively minor and/or not
+ * worth it to fix.
+ *
+ * This loop is the easiest to fix, as unlike XXH32, this pragma
+ * _actually works_ because it is a loop vectorization instead of an
+ * SLP vectorization.
+ */
+ #pragma clang loop vectorize(disable)
+#endif
+ for (i=8 ; i < nbRounds; i++) {
+ /*
+ * Prevents clang for unrolling the acc loop and interleaving with this one.
+ */
+ XXH_COMPILER_GUARD(acc);
+ acc_end += XXH3_mix16B(input+(16*i), secret+(16*(i-8)) + XXH3_MIDSIZE_STARTOFFSET, seed);
+ }
+ return XXH3_avalanche(acc + acc_end);
+ }
+}
+
+
+/* ======= Long Keys ======= */
+
+#define XXH_STRIPE_LEN 64
+#define XXH_SECRET_CONSUME_RATE 8 /* nb of secret bytes consumed at each accumulation */
+#define XXH_ACC_NB (XXH_STRIPE_LEN / sizeof(xxh_u64))
+
+#ifdef XXH_OLD_NAMES
+# define STRIPE_LEN XXH_STRIPE_LEN
+# define ACC_NB XXH_ACC_NB
+#endif
+
+#ifndef XXH_PREFETCH_DIST
+# ifdef __clang__
+# define XXH_PREFETCH_DIST 320
+# else
+# if (XXH_VECTOR == XXH_AVX512)
+# define XXH_PREFETCH_DIST 512
+# else
+# define XXH_PREFETCH_DIST 384
+# endif
+# endif /* __clang__ */
+#endif /* XXH_PREFETCH_DIST */
+
+/*
+ * These macros are to generate an XXH3_accumulate() function.
+ * The two arguments select the name suffix and target attribute.
+ *
+ * The name of this symbol is XXH3_accumulate_<name>() and it calls
+ * XXH3_accumulate_512_<name>().
+ *
+ * It may be useful to hand implement this function if the compiler fails to
+ * optimize the inline function.
+ */
+#define XXH3_ACCUMULATE_TEMPLATE(name) \
+void \
+XXH3_accumulate_##name(xxh_u64* XXH_RESTRICT acc, \
+ const xxh_u8* XXH_RESTRICT input, \
+ const xxh_u8* XXH_RESTRICT secret, \
+ size_t nbStripes) \
+{ \
+ size_t n; \
+ for (n = 0; n < nbStripes; n++ ) { \
+ const xxh_u8* const in = input + n*XXH_STRIPE_LEN; \
+ XXH_PREFETCH(in + XXH_PREFETCH_DIST); \
+ XXH3_accumulate_512_##name( \
+ acc, \
+ in, \
+ secret + n*XXH_SECRET_CONSUME_RATE); \
+ } \
+}
+
+
+XXH_FORCE_INLINE void XXH_writeLE64(void* dst, xxh_u64 v64)
+{
+ if (!XXH_CPU_LITTLE_ENDIAN) v64 = XXH_swap64(v64);
+ XXH_memcpy(dst, &v64, sizeof(v64));
+}
+
+/* Several intrinsic functions below are supposed to accept __int64 as argument,
+ * as documented in https://software.intel.com/sites/landingpage/IntrinsicsGuide/ .
+ * However, several environments do not define __int64 type,
+ * requiring a workaround.
+ */
+#if !defined (__VMS) \
+ && (defined (__cplusplus) \
+ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
+ typedef int64_t xxh_i64;
+#else
+ /* the following type must have a width of 64-bit */
+ typedef long long xxh_i64;
+#endif
+
+
+/*
+ * XXH3_accumulate_512 is the tightest loop for long inputs, and it is the most optimized.
+ *
+ * It is a hardened version of UMAC, based off of FARSH's implementation.
+ *
+ * This was chosen because it adapts quite well to 32-bit, 64-bit, and SIMD
+ * implementations, and it is ridiculously fast.
+ *
+ * We harden it by mixing the original input to the accumulators as well as the product.
+ *
+ * This means that in the (relatively likely) case of a multiply by zero, the
+ * original input is preserved.
+ *
+ * On 128-bit inputs, we swap 64-bit pairs when we add the input to improve
+ * cross-pollination, as otherwise the upper and lower halves would be
+ * essentially independent.
+ *
+ * This doesn't matter on 64-bit hashes since they all get merged together in
+ * the end, so we skip the extra step.
+ *
+ * Both XXH3_64bits and XXH3_128bits use this subroutine.
+ */
+
+#if (XXH_VECTOR == XXH_AVX512) \
+ || (defined(XXH_DISPATCH_AVX512) && XXH_DISPATCH_AVX512 != 0)
+
+#ifndef XXH_TARGET_AVX512
+# define XXH_TARGET_AVX512 /* disable attribute target */
+#endif
+
+XXH_FORCE_INLINE XXH_TARGET_AVX512 void
+XXH3_accumulate_512_avx512(void* XXH_RESTRICT acc,
+ const void* XXH_RESTRICT input,
+ const void* XXH_RESTRICT secret)
+{
+ __m512i* const xacc = (__m512i *) acc;
+ XXH_ASSERT((((size_t)acc) & 63) == 0);
+ XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
+
+ {
+ /* data_vec = input[0]; */
+ __m512i const data_vec = _mm512_loadu_si512 (input);
+ /* key_vec = secret[0]; */
+ __m512i const key_vec = _mm512_loadu_si512 (secret);
+ /* data_key = data_vec ^ key_vec; */
+ __m512i const data_key = _mm512_xor_si512 (data_vec, key_vec);
+ /* data_key_lo = data_key >> 32; */
+ __m512i const data_key_lo = _mm512_srli_epi64 (data_key, 32);
+ /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
+ __m512i const product = _mm512_mul_epu32 (data_key, data_key_lo);
+ /* xacc[0] += swap(data_vec); */
+ __m512i const data_swap = _mm512_shuffle_epi32(data_vec, (_MM_PERM_ENUM)_MM_SHUFFLE(1, 0, 3, 2));
+ __m512i const sum = _mm512_add_epi64(*xacc, data_swap);
+ /* xacc[0] += product; */
+ *xacc = _mm512_add_epi64(product, sum);
+ }
+}
+XXH_FORCE_INLINE XXH_TARGET_AVX512 XXH3_ACCUMULATE_TEMPLATE(avx512)
+
+/*
+ * XXH3_scrambleAcc: Scrambles the accumulators to improve mixing.
+ *
+ * Multiplication isn't perfect, as explained by Google in HighwayHash:
+ *
+ * // Multiplication mixes/scrambles bytes 0-7 of the 64-bit result to
+ * // varying degrees. In descending order of goodness, bytes
+ * // 3 4 2 5 1 6 0 7 have quality 228 224 164 160 100 96 36 32.
+ * // As expected, the upper and lower bytes are much worse.
+ *
+ * Source: https://github.com/google/highwayhash/blob/0aaf66b/highwayhash/hh_avx2.h#L291
+ *
+ * Since our algorithm uses a pseudorandom secret to add some variance into the
+ * mix, we don't need to (or want to) mix as often or as much as HighwayHash does.
+ *
+ * This isn't as tight as XXH3_accumulate, but still written in SIMD to avoid
+ * extraction.
+ *
+ * Both XXH3_64bits and XXH3_128bits use this subroutine.
+ */
+
+XXH_FORCE_INLINE XXH_TARGET_AVX512 void
+XXH3_scrambleAcc_avx512(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
+{
+ XXH_ASSERT((((size_t)acc) & 63) == 0);
+ XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
+ { __m512i* const xacc = (__m512i*) acc;
+ const __m512i prime32 = _mm512_set1_epi32((int)XXH_PRIME32_1);
+
+ /* xacc[0] ^= (xacc[0] >> 47) */
+ __m512i const acc_vec = *xacc;
+ __m512i const shifted = _mm512_srli_epi64 (acc_vec, 47);
+ /* xacc[0] ^= secret; */
+ __m512i const key_vec = _mm512_loadu_si512 (secret);
+ __m512i const data_key = _mm512_ternarylogic_epi32(key_vec, acc_vec, shifted, 0x96 /* key_vec ^ acc_vec ^ shifted */);
+
+ /* xacc[0] *= XXH_PRIME32_1; */
+ __m512i const data_key_hi = _mm512_srli_epi64 (data_key, 32);
+ __m512i const prod_lo = _mm512_mul_epu32 (data_key, prime32);
+ __m512i const prod_hi = _mm512_mul_epu32 (data_key_hi, prime32);
+ *xacc = _mm512_add_epi64(prod_lo, _mm512_slli_epi64(prod_hi, 32));
+ }
+}
+
+XXH_FORCE_INLINE XXH_TARGET_AVX512 void
+XXH3_initCustomSecret_avx512(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
+{
+ XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 63) == 0);
+ XXH_STATIC_ASSERT(XXH_SEC_ALIGN == 64);
+ XXH_ASSERT(((size_t)customSecret & 63) == 0);
+ (void)(&XXH_writeLE64);
+ { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m512i);
+ __m512i const seed_pos = _mm512_set1_epi64((xxh_i64)seed64);
+ __m512i const seed = _mm512_mask_sub_epi64(seed_pos, 0xAA, _mm512_set1_epi8(0), seed_pos);
+
+ const __m512i* const src = (const __m512i*) ((const void*) XXH3_kSecret);
+ __m512i* const dest = ( __m512i*) customSecret;
+ int i;
+ XXH_ASSERT(((size_t)src & 63) == 0); /* control alignment */
+ XXH_ASSERT(((size_t)dest & 63) == 0);
+ for (i=0; i < nbRounds; ++i) {
+ dest[i] = _mm512_add_epi64(_mm512_load_si512(src + i), seed);
+ } }
+}
+
+#endif
+
+#if (XXH_VECTOR == XXH_AVX2) \
+ || (defined(XXH_DISPATCH_AVX2) && XXH_DISPATCH_AVX2 != 0)
+
+#ifndef XXH_TARGET_AVX2
+# define XXH_TARGET_AVX2 /* disable attribute target */
+#endif
+
+XXH_FORCE_INLINE XXH_TARGET_AVX2 void
+XXH3_accumulate_512_avx2( void* XXH_RESTRICT acc,
+ const void* XXH_RESTRICT input,
+ const void* XXH_RESTRICT secret)
+{
+ XXH_ASSERT((((size_t)acc) & 31) == 0);
+ { __m256i* const xacc = (__m256i *) acc;
+ /* Unaligned. This is mainly for pointer arithmetic, and because
+ * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
+ const __m256i* const xinput = (const __m256i *) input;
+ /* Unaligned. This is mainly for pointer arithmetic, and because
+ * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
+ const __m256i* const xsecret = (const __m256i *) secret;
+
+ size_t i;
+ for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) {
+ /* data_vec = xinput[i]; */
+ __m256i const data_vec = _mm256_loadu_si256 (xinput+i);
+ /* key_vec = xsecret[i]; */
+ __m256i const key_vec = _mm256_loadu_si256 (xsecret+i);
+ /* data_key = data_vec ^ key_vec; */
+ __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec);
+ /* data_key_lo = data_key >> 32; */
+ __m256i const data_key_lo = _mm256_srli_epi64 (data_key, 32);
+ /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
+ __m256i const product = _mm256_mul_epu32 (data_key, data_key_lo);
+ /* xacc[i] += swap(data_vec); */
+ __m256i const data_swap = _mm256_shuffle_epi32(data_vec, _MM_SHUFFLE(1, 0, 3, 2));
+ __m256i const sum = _mm256_add_epi64(xacc[i], data_swap);
+ /* xacc[i] += product; */
+ xacc[i] = _mm256_add_epi64(product, sum);
+ } }
+}
+XXH_FORCE_INLINE XXH_TARGET_AVX2 XXH3_ACCUMULATE_TEMPLATE(avx2)
+
+XXH_FORCE_INLINE XXH_TARGET_AVX2 void
+XXH3_scrambleAcc_avx2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
+{
+ XXH_ASSERT((((size_t)acc) & 31) == 0);
+ { __m256i* const xacc = (__m256i*) acc;
+ /* Unaligned. This is mainly for pointer arithmetic, and because
+ * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
+ const __m256i* const xsecret = (const __m256i *) secret;
+ const __m256i prime32 = _mm256_set1_epi32((int)XXH_PRIME32_1);
+
+ size_t i;
+ for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) {
+ /* xacc[i] ^= (xacc[i] >> 47) */
+ __m256i const acc_vec = xacc[i];
+ __m256i const shifted = _mm256_srli_epi64 (acc_vec, 47);
+ __m256i const data_vec = _mm256_xor_si256 (acc_vec, shifted);
+ /* xacc[i] ^= xsecret; */
+ __m256i const key_vec = _mm256_loadu_si256 (xsecret+i);
+ __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec);
+
+ /* xacc[i] *= XXH_PRIME32_1; */
+ __m256i const data_key_hi = _mm256_srli_epi64 (data_key, 32);
+ __m256i const prod_lo = _mm256_mul_epu32 (data_key, prime32);
+ __m256i const prod_hi = _mm256_mul_epu32 (data_key_hi, prime32);
+ xacc[i] = _mm256_add_epi64(prod_lo, _mm256_slli_epi64(prod_hi, 32));
+ }
+ }
+}
+
+XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_initCustomSecret_avx2(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
+{
+ XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 31) == 0);
+ XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE / sizeof(__m256i)) == 6);
+ XXH_STATIC_ASSERT(XXH_SEC_ALIGN <= 64);
+ (void)(&XXH_writeLE64);
+ XXH_PREFETCH(customSecret);
+ { __m256i const seed = _mm256_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64, (xxh_i64)(0U - seed64), (xxh_i64)seed64);
+
+ const __m256i* const src = (const __m256i*) ((const void*) XXH3_kSecret);
+ __m256i* dest = ( __m256i*) customSecret;
+
+# if defined(__GNUC__) || defined(__clang__)
+ /*
+ * On GCC & Clang, marking 'dest' as modified will cause the compiler:
+ * - do not extract the secret from sse registers in the internal loop
+ * - use less common registers, and avoid pushing these reg into stack
+ */
+ XXH_COMPILER_GUARD(dest);
+# endif
+ XXH_ASSERT(((size_t)src & 31) == 0); /* control alignment */
+ XXH_ASSERT(((size_t)dest & 31) == 0);
+
+ /* GCC -O2 need unroll loop manually */
+ dest[0] = _mm256_add_epi64(_mm256_load_si256(src+0), seed);
+ dest[1] = _mm256_add_epi64(_mm256_load_si256(src+1), seed);
+ dest[2] = _mm256_add_epi64(_mm256_load_si256(src+2), seed);
+ dest[3] = _mm256_add_epi64(_mm256_load_si256(src+3), seed);
+ dest[4] = _mm256_add_epi64(_mm256_load_si256(src+4), seed);
+ dest[5] = _mm256_add_epi64(_mm256_load_si256(src+5), seed);
+ }
+}
+
+#endif
+
+/* x86dispatch always generates SSE2 */
+#if (XXH_VECTOR == XXH_SSE2) || defined(XXH_X86DISPATCH)
+
+#ifndef XXH_TARGET_SSE2
+# define XXH_TARGET_SSE2 /* disable attribute target */
+#endif
+
+XXH_FORCE_INLINE XXH_TARGET_SSE2 void
+XXH3_accumulate_512_sse2( void* XXH_RESTRICT acc,
+ const void* XXH_RESTRICT input,
+ const void* XXH_RESTRICT secret)
+{
+ /* SSE2 is just a half-scale version of the AVX2 version. */
+ XXH_ASSERT((((size_t)acc) & 15) == 0);
+ { __m128i* const xacc = (__m128i *) acc;
+ /* Unaligned. This is mainly for pointer arithmetic, and because
+ * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
+ const __m128i* const xinput = (const __m128i *) input;
+ /* Unaligned. This is mainly for pointer arithmetic, and because
+ * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
+ const __m128i* const xsecret = (const __m128i *) secret;
+
+ size_t i;
+ for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) {
+ /* data_vec = xinput[i]; */
+ __m128i const data_vec = _mm_loadu_si128 (xinput+i);
+ /* key_vec = xsecret[i]; */
+ __m128i const key_vec = _mm_loadu_si128 (xsecret+i);
+ /* data_key = data_vec ^ key_vec; */
+ __m128i const data_key = _mm_xor_si128 (data_vec, key_vec);
+ /* data_key_lo = data_key >> 32; */
+ __m128i const data_key_lo = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
+ /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
+ __m128i const product = _mm_mul_epu32 (data_key, data_key_lo);
+ /* xacc[i] += swap(data_vec); */
+ __m128i const data_swap = _mm_shuffle_epi32(data_vec, _MM_SHUFFLE(1,0,3,2));
+ __m128i const sum = _mm_add_epi64(xacc[i], data_swap);
+ /* xacc[i] += product; */
+ xacc[i] = _mm_add_epi64(product, sum);
+ } }
+}
+XXH_FORCE_INLINE XXH_TARGET_SSE2 XXH3_ACCUMULATE_TEMPLATE(sse2)
+
+XXH_FORCE_INLINE XXH_TARGET_SSE2 void
+XXH3_scrambleAcc_sse2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
+{
+ XXH_ASSERT((((size_t)acc) & 15) == 0);
+ { __m128i* const xacc = (__m128i*) acc;
+ /* Unaligned. This is mainly for pointer arithmetic, and because
+ * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
+ const __m128i* const xsecret = (const __m128i *) secret;
+ const __m128i prime32 = _mm_set1_epi32((int)XXH_PRIME32_1);
+
+ size_t i;
+ for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) {
+ /* xacc[i] ^= (xacc[i] >> 47) */
+ __m128i const acc_vec = xacc[i];
+ __m128i const shifted = _mm_srli_epi64 (acc_vec, 47);
+ __m128i const data_vec = _mm_xor_si128 (acc_vec, shifted);
+ /* xacc[i] ^= xsecret[i]; */
+ __m128i const key_vec = _mm_loadu_si128 (xsecret+i);
+ __m128i const data_key = _mm_xor_si128 (data_vec, key_vec);
+
+ /* xacc[i] *= XXH_PRIME32_1; */
+ __m128i const data_key_hi = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
+ __m128i const prod_lo = _mm_mul_epu32 (data_key, prime32);
+ __m128i const prod_hi = _mm_mul_epu32 (data_key_hi, prime32);
+ xacc[i] = _mm_add_epi64(prod_lo, _mm_slli_epi64(prod_hi, 32));
+ }
+ }
+}
+
+XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_initCustomSecret_sse2(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
+{
+ XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
+ (void)(&XXH_writeLE64);
+ { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m128i);
+
+# if defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER < 1900
+ /* MSVC 32bit mode does not support _mm_set_epi64x before 2015 */
+ XXH_ALIGN(16) const xxh_i64 seed64x2[2] = { (xxh_i64)seed64, (xxh_i64)(0U - seed64) };
+ __m128i const seed = _mm_load_si128((__m128i const*)seed64x2);
+# else
+ __m128i const seed = _mm_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64);
+# endif
+ int i;
+
+ const void* const src16 = XXH3_kSecret;
+ __m128i* dst16 = (__m128i*) customSecret;
+# if defined(__GNUC__) || defined(__clang__)
+ /*
+ * On GCC & Clang, marking 'dest' as modified will cause the compiler:
+ * - do not extract the secret from sse registers in the internal loop
+ * - use less common registers, and avoid pushing these reg into stack
+ */
+ XXH_COMPILER_GUARD(dst16);
+# endif
+ XXH_ASSERT(((size_t)src16 & 15) == 0); /* control alignment */
+ XXH_ASSERT(((size_t)dst16 & 15) == 0);
+
+ for (i=0; i < nbRounds; ++i) {
+ dst16[i] = _mm_add_epi64(_mm_load_si128((const __m128i *)src16+i), seed);
+ } }
+}
+
+#endif
+
+#if (XXH_VECTOR == XXH_NEON)
+
+/* forward declarations for the scalar routines */
+XXH_FORCE_INLINE void
+XXH3_scalarRound(void* XXH_RESTRICT acc, void const* XXH_RESTRICT input,
+ void const* XXH_RESTRICT secret, size_t lane);
+
+XXH_FORCE_INLINE void
+XXH3_scalarScrambleRound(void* XXH_RESTRICT acc,
+ void const* XXH_RESTRICT secret, size_t lane);
+
+/*!
+ * @internal
+ * @brief The bulk processing loop for NEON and WASM SIMD128.
+ *
+ * The NEON code path is actually partially scalar when running on AArch64. This
+ * is to optimize the pipelining and can have up to 15% speedup depending on the
+ * CPU, and it also mitigates some GCC codegen issues.
+ *
+ * @see XXH3_NEON_LANES for configuring this and details about this optimization.
+ *
+ * NEON's 32-bit to 64-bit long multiply takes a half vector of 32-bit
+ * integers instead of the other platforms which mask full 64-bit vectors,
+ * so the setup is more complicated than just shifting right.
+ *
+ * Additionally, there is an optimization for 4 lanes at once noted below.
+ *
+ * Since, as stated, the most optimal amount of lanes for Cortexes is 6,
+ * there needs to be *three* versions of the accumulate operation used
+ * for the remaining 2 lanes.
+ *
+ * WASM's SIMD128 uses SIMDe's arm_neon.h polyfill because the intrinsics overlap
+ * nearly perfectly.
+ */
+
+XXH_FORCE_INLINE void
+XXH3_accumulate_512_neon( void* XXH_RESTRICT acc,
+ const void* XXH_RESTRICT input,
+ const void* XXH_RESTRICT secret)
+{
+ XXH_ASSERT((((size_t)acc) & 15) == 0);
+ XXH_STATIC_ASSERT(XXH3_NEON_LANES > 0 && XXH3_NEON_LANES <= XXH_ACC_NB && XXH3_NEON_LANES % 2 == 0);
+ { /* GCC for darwin arm64 does not like aliasing here */
+ xxh_aliasing_uint64x2_t* const xacc = (xxh_aliasing_uint64x2_t*) acc;
+ /* We don't use a uint32x4_t pointer because it causes bus errors on ARMv7. */
+ uint8_t const* xinput = (const uint8_t *) input;
+ uint8_t const* xsecret = (const uint8_t *) secret;
+
+ size_t i;
+#ifdef __wasm_simd128__
+ /*
+ * On WASM SIMD128, Clang emits direct address loads when XXH3_kSecret
+ * is constant propagated, which results in it converting it to this
+ * inside the loop:
+ *
+ * a = v128.load(XXH3_kSecret + 0 + $secret_offset, offset = 0)
+ * b = v128.load(XXH3_kSecret + 16 + $secret_offset, offset = 0)
+ * ...
+ *
+ * This requires a full 32-bit address immediate (and therefore a 6 byte
+ * instruction) as well as an add for each offset.
+ *
+ * Putting an asm guard prevents it from folding (at the cost of losing
+ * the alignment hint), and uses the free offset in `v128.load` instead
+ * of adding secret_offset each time which overall reduces code size by
+ * about a kilobyte and improves performance.
+ */
+ XXH_COMPILER_GUARD(xsecret);
+#endif
+ /* Scalar lanes use the normal scalarRound routine */
+ for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) {
+ XXH3_scalarRound(acc, input, secret, i);
+ }
+ i = 0;
+ /* 4 NEON lanes at a time. */
+ for (; i+1 < XXH3_NEON_LANES / 2; i+=2) {
+ /* data_vec = xinput[i]; */
+ uint64x2_t data_vec_1 = XXH_vld1q_u64(xinput + (i * 16));
+ uint64x2_t data_vec_2 = XXH_vld1q_u64(xinput + ((i+1) * 16));
+ /* key_vec = xsecret[i]; */
+ uint64x2_t key_vec_1 = XXH_vld1q_u64(xsecret + (i * 16));
+ uint64x2_t key_vec_2 = XXH_vld1q_u64(xsecret + ((i+1) * 16));
+ /* data_swap = swap(data_vec) */
+ uint64x2_t data_swap_1 = vextq_u64(data_vec_1, data_vec_1, 1);
+ uint64x2_t data_swap_2 = vextq_u64(data_vec_2, data_vec_2, 1);
+ /* data_key = data_vec ^ key_vec; */
+ uint64x2_t data_key_1 = veorq_u64(data_vec_1, key_vec_1);
+ uint64x2_t data_key_2 = veorq_u64(data_vec_2, key_vec_2);
+
+ /*
+ * If we reinterpret the 64x2 vectors as 32x4 vectors, we can use a
+ * de-interleave operation for 4 lanes in 1 step with `vuzpq_u32` to
+ * get one vector with the low 32 bits of each lane, and one vector
+ * with the high 32 bits of each lane.
+ *
+ * The intrinsic returns a double vector because the original ARMv7-a
+ * instruction modified both arguments in place. AArch64 and SIMD128 emit
+ * two instructions from this intrinsic.
+ *
+ * [ dk11L | dk11H | dk12L | dk12H ] -> [ dk11L | dk12L | dk21L | dk22L ]
+ * [ dk21L | dk21H | dk22L | dk22H ] -> [ dk11H | dk12H | dk21H | dk22H ]
+ */
+ uint32x4x2_t unzipped = vuzpq_u32(
+ vreinterpretq_u32_u64(data_key_1),
+ vreinterpretq_u32_u64(data_key_2)
+ );
+ /* data_key_lo = data_key & 0xFFFFFFFF */
+ uint32x4_t data_key_lo = unzipped.val[0];
+ /* data_key_hi = data_key >> 32 */
+ uint32x4_t data_key_hi = unzipped.val[1];
+ /*
+ * Then, we can split the vectors horizontally and multiply which, as for most
+ * widening intrinsics, have a variant that works on both high half vectors
+ * for free on AArch64. A similar instruction is available on SIMD128.
+ *
+ * sum = data_swap + (u64x2) data_key_lo * (u64x2) data_key_hi
+ */
+ uint64x2_t sum_1 = XXH_vmlal_low_u32(data_swap_1, data_key_lo, data_key_hi);
+ uint64x2_t sum_2 = XXH_vmlal_high_u32(data_swap_2, data_key_lo, data_key_hi);
+ /*
+ * Clang reorders
+ * a += b * c; // umlal swap.2d, dkl.2s, dkh.2s
+ * c += a; // add acc.2d, acc.2d, swap.2d
+ * to
+ * c += a; // add acc.2d, acc.2d, swap.2d
+ * c += b * c; // umlal acc.2d, dkl.2s, dkh.2s
+ *
+ * While it would make sense in theory since the addition is faster,
+ * for reasons likely related to umlal being limited to certain NEON
+ * pipelines, this is worse. A compiler guard fixes this.
+ */
+ XXH_COMPILER_GUARD_CLANG_NEON(sum_1);
+ XXH_COMPILER_GUARD_CLANG_NEON(sum_2);
+ /* xacc[i] = acc_vec + sum; */
+ xacc[i] = vaddq_u64(xacc[i], sum_1);
+ xacc[i+1] = vaddq_u64(xacc[i+1], sum_2);
+ }
+ /* Operate on the remaining NEON lanes 2 at a time. */
+ for (; i < XXH3_NEON_LANES / 2; i++) {
+ /* data_vec = xinput[i]; */
+ uint64x2_t data_vec = XXH_vld1q_u64(xinput + (i * 16));
+ /* key_vec = xsecret[i]; */
+ uint64x2_t key_vec = XXH_vld1q_u64(xsecret + (i * 16));
+ /* acc_vec_2 = swap(data_vec) */
+ uint64x2_t data_swap = vextq_u64(data_vec, data_vec, 1);
+ /* data_key = data_vec ^ key_vec; */
+ uint64x2_t data_key = veorq_u64(data_vec, key_vec);
+ /* For two lanes, just use VMOVN and VSHRN. */
+ /* data_key_lo = data_key & 0xFFFFFFFF; */
+ uint32x2_t data_key_lo = vmovn_u64(data_key);
+ /* data_key_hi = data_key >> 32; */
+ uint32x2_t data_key_hi = vshrn_n_u64(data_key, 32);
+ /* sum = data_swap + (u64x2) data_key_lo * (u64x2) data_key_hi; */
+ uint64x2_t sum = vmlal_u32(data_swap, data_key_lo, data_key_hi);
+ /* Same Clang workaround as before */
+ XXH_COMPILER_GUARD_CLANG_NEON(sum);
+ /* xacc[i] = acc_vec + sum; */
+ xacc[i] = vaddq_u64 (xacc[i], sum);
+ }
+ }
+}
+XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(neon)
+
+XXH_FORCE_INLINE void
+XXH3_scrambleAcc_neon(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
+{
+ XXH_ASSERT((((size_t)acc) & 15) == 0);
+
+ { xxh_aliasing_uint64x2_t* xacc = (xxh_aliasing_uint64x2_t*) acc;
+ uint8_t const* xsecret = (uint8_t const*) secret;
+
+ size_t i;
+ /* WASM uses operator overloads and doesn't need these. */
+#ifndef __wasm_simd128__
+ /* { prime32_1, prime32_1 } */
+ uint32x2_t const kPrimeLo = vdup_n_u32(XXH_PRIME32_1);
+ /* { 0, prime32_1, 0, prime32_1 } */
+ uint32x4_t const kPrimeHi = vreinterpretq_u32_u64(vdupq_n_u64((xxh_u64)XXH_PRIME32_1 << 32));
+#endif
+
+ /* AArch64 uses both scalar and neon at the same time */
+ for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) {
+ XXH3_scalarScrambleRound(acc, secret, i);
+ }
+ for (i=0; i < XXH3_NEON_LANES / 2; i++) {
+ /* xacc[i] ^= (xacc[i] >> 47); */
+ uint64x2_t acc_vec = xacc[i];
+ uint64x2_t shifted = vshrq_n_u64(acc_vec, 47);
+ uint64x2_t data_vec = veorq_u64(acc_vec, shifted);
+
+ /* xacc[i] ^= xsecret[i]; */
+ uint64x2_t key_vec = XXH_vld1q_u64(xsecret + (i * 16));
+ uint64x2_t data_key = veorq_u64(data_vec, key_vec);
+ /* xacc[i] *= XXH_PRIME32_1 */
+#ifdef __wasm_simd128__
+ /* SIMD128 has multiply by u64x2, use it instead of expanding and scalarizing */
+ xacc[i] = data_key * XXH_PRIME32_1;
+#else
+ /*
+ * Expanded version with portable NEON intrinsics
+ *
+ * lo(x) * lo(y) + (hi(x) * lo(y) << 32)
+ *
+ * prod_hi = hi(data_key) * lo(prime) << 32
+ *
+ * Since we only need 32 bits of this multiply a trick can be used, reinterpreting the vector
+ * as a uint32x4_t and multiplying by { 0, prime, 0, prime } to cancel out the unwanted bits
+ * and avoid the shift.
+ */
+ uint32x4_t prod_hi = vmulq_u32 (vreinterpretq_u32_u64(data_key), kPrimeHi);
+ /* Extract low bits for vmlal_u32 */
+ uint32x2_t data_key_lo = vmovn_u64(data_key);
+ /* xacc[i] = prod_hi + lo(data_key) * XXH_PRIME32_1; */
+ xacc[i] = vmlal_u32(vreinterpretq_u64_u32(prod_hi), data_key_lo, kPrimeLo);
+#endif
+ }
+ }
+}
+#endif
+
+#if (XXH_VECTOR == XXH_VSX)
+
+XXH_FORCE_INLINE void
+XXH3_accumulate_512_vsx( void* XXH_RESTRICT acc,
+ const void* XXH_RESTRICT input,
+ const void* XXH_RESTRICT secret)
+{
+ /* presumed aligned */
+ xxh_aliasing_u64x2* const xacc = (xxh_aliasing_u64x2*) acc;
+ xxh_u8 const* const xinput = (xxh_u8 const*) input; /* no alignment restriction */
+ xxh_u8 const* const xsecret = (xxh_u8 const*) secret; /* no alignment restriction */
+ xxh_u64x2 const v32 = { 32, 32 };
+ size_t i;
+ for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
+ /* data_vec = xinput[i]; */
+ xxh_u64x2 const data_vec = XXH_vec_loadu(xinput + 16*i);
+ /* key_vec = xsecret[i]; */
+ xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + 16*i);
+ xxh_u64x2 const data_key = data_vec ^ key_vec;
+ /* shuffled = (data_key << 32) | (data_key >> 32); */
+ xxh_u32x4 const shuffled = (xxh_u32x4)vec_rl(data_key, v32);
+ /* product = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)shuffled & 0xFFFFFFFF); */
+ xxh_u64x2 const product = XXH_vec_mulo((xxh_u32x4)data_key, shuffled);
+ /* acc_vec = xacc[i]; */
+ xxh_u64x2 acc_vec = xacc[i];
+ acc_vec += product;
+
+ /* swap high and low halves */
+#ifdef __s390x__
+ acc_vec += vec_permi(data_vec, data_vec, 2);
+#else
+ acc_vec += vec_xxpermdi(data_vec, data_vec, 2);
+#endif
+ xacc[i] = acc_vec;
+ }
+}
+XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(vsx)
+
+XXH_FORCE_INLINE void
+XXH3_scrambleAcc_vsx(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
+{
+ XXH_ASSERT((((size_t)acc) & 15) == 0);
+
+ { xxh_aliasing_u64x2* const xacc = (xxh_aliasing_u64x2*) acc;
+ const xxh_u8* const xsecret = (const xxh_u8*) secret;
+ /* constants */
+ xxh_u64x2 const v32 = { 32, 32 };
+ xxh_u64x2 const v47 = { 47, 47 };
+ xxh_u32x4 const prime = { XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1 };
+ size_t i;
+ for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
+ /* xacc[i] ^= (xacc[i] >> 47); */
+ xxh_u64x2 const acc_vec = xacc[i];
+ xxh_u64x2 const data_vec = acc_vec ^ (acc_vec >> v47);
+
+ /* xacc[i] ^= xsecret[i]; */
+ xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + 16*i);
+ xxh_u64x2 const data_key = data_vec ^ key_vec;
+
+ /* xacc[i] *= XXH_PRIME32_1 */
+ /* prod_lo = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)prime & 0xFFFFFFFF); */
+ xxh_u64x2 const prod_even = XXH_vec_mule((xxh_u32x4)data_key, prime);
+ /* prod_hi = ((xxh_u64x2)data_key >> 32) * ((xxh_u64x2)prime >> 32); */
+ xxh_u64x2 const prod_odd = XXH_vec_mulo((xxh_u32x4)data_key, prime);
+ xacc[i] = prod_odd + (prod_even << v32);
+ } }
+}
+
+#endif
+
+#if (XXH_VECTOR == XXH_SVE)
+
+XXH_FORCE_INLINE void
+XXH3_accumulate_512_sve( void* XXH_RESTRICT acc,
+ const void* XXH_RESTRICT input,
+ const void* XXH_RESTRICT secret)
+{
+ uint64_t *xacc = (uint64_t *)acc;
+ const uint64_t *xinput = (const uint64_t *)(const void *)input;
+ const uint64_t *xsecret = (const uint64_t *)(const void *)secret;
+ svuint64_t kSwap = sveor_n_u64_z(svptrue_b64(), svindex_u64(0, 1), 1);
+ uint64_t element_count = svcntd();
+ if (element_count >= 8) {
+ svbool_t mask = svptrue_pat_b64(SV_VL8);
+ svuint64_t vacc = svld1_u64(mask, xacc);
+ ACCRND(vacc, 0);
+ svst1_u64(mask, xacc, vacc);
+ } else if (element_count == 2) { /* sve128 */
+ svbool_t mask = svptrue_pat_b64(SV_VL2);
+ svuint64_t acc0 = svld1_u64(mask, xacc + 0);
+ svuint64_t acc1 = svld1_u64(mask, xacc + 2);
+ svuint64_t acc2 = svld1_u64(mask, xacc + 4);
+ svuint64_t acc3 = svld1_u64(mask, xacc + 6);
+ ACCRND(acc0, 0);
+ ACCRND(acc1, 2);
+ ACCRND(acc2, 4);
+ ACCRND(acc3, 6);
+ svst1_u64(mask, xacc + 0, acc0);
+ svst1_u64(mask, xacc + 2, acc1);
+ svst1_u64(mask, xacc + 4, acc2);
+ svst1_u64(mask, xacc + 6, acc3);
+ } else {
+ svbool_t mask = svptrue_pat_b64(SV_VL4);
+ svuint64_t acc0 = svld1_u64(mask, xacc + 0);
+ svuint64_t acc1 = svld1_u64(mask, xacc + 4);
+ ACCRND(acc0, 0);
+ ACCRND(acc1, 4);
+ svst1_u64(mask, xacc + 0, acc0);
+ svst1_u64(mask, xacc + 4, acc1);
+ }
+}
+
+XXH_FORCE_INLINE void
+XXH3_accumulate_sve(xxh_u64* XXH_RESTRICT acc,
+ const xxh_u8* XXH_RESTRICT input,
+ const xxh_u8* XXH_RESTRICT secret,
+ size_t nbStripes)
+{
+ if (nbStripes != 0) {
+ uint64_t *xacc = (uint64_t *)acc;
+ const uint64_t *xinput = (const uint64_t *)(const void *)input;
+ const uint64_t *xsecret = (const uint64_t *)(const void *)secret;
+ svuint64_t kSwap = sveor_n_u64_z(svptrue_b64(), svindex_u64(0, 1), 1);
+ uint64_t element_count = svcntd();
+ if (element_count >= 8) {
+ svbool_t mask = svptrue_pat_b64(SV_VL8);
+ svuint64_t vacc = svld1_u64(mask, xacc + 0);
+ do {
+ /* svprfd(svbool_t, void *, enum svfprop); */
+ svprfd(mask, xinput + 128, SV_PLDL1STRM);
+ ACCRND(vacc, 0);
+ xinput += 8;
+ xsecret += 1;
+ nbStripes--;
+ } while (nbStripes != 0);
+
+ svst1_u64(mask, xacc + 0, vacc);
+ } else if (element_count == 2) { /* sve128 */
+ svbool_t mask = svptrue_pat_b64(SV_VL2);
+ svuint64_t acc0 = svld1_u64(mask, xacc + 0);
+ svuint64_t acc1 = svld1_u64(mask, xacc + 2);
+ svuint64_t acc2 = svld1_u64(mask, xacc + 4);
+ svuint64_t acc3 = svld1_u64(mask, xacc + 6);
+ do {
+ svprfd(mask, xinput + 128, SV_PLDL1STRM);
+ ACCRND(acc0, 0);
+ ACCRND(acc1, 2);
+ ACCRND(acc2, 4);
+ ACCRND(acc3, 6);
+ xinput += 8;
+ xsecret += 1;
+ nbStripes--;
+ } while (nbStripes != 0);
+
+ svst1_u64(mask, xacc + 0, acc0);
+ svst1_u64(mask, xacc + 2, acc1);
+ svst1_u64(mask, xacc + 4, acc2);
+ svst1_u64(mask, xacc + 6, acc3);
+ } else {
+ svbool_t mask = svptrue_pat_b64(SV_VL4);
+ svuint64_t acc0 = svld1_u64(mask, xacc + 0);
+ svuint64_t acc1 = svld1_u64(mask, xacc + 4);
+ do {
+ svprfd(mask, xinput + 128, SV_PLDL1STRM);
+ ACCRND(acc0, 0);
+ ACCRND(acc1, 4);
+ xinput += 8;
+ xsecret += 1;
+ nbStripes--;
+ } while (nbStripes != 0);
+
+ svst1_u64(mask, xacc + 0, acc0);
+ svst1_u64(mask, xacc + 4, acc1);
+ }
+ }
+}
+
+#endif
+
+/* scalar variants - universal */
+
+#if defined(__aarch64__) && (defined(__GNUC__) || defined(__clang__))
+/*
+ * In XXH3_scalarRound(), GCC and Clang have a similar codegen issue, where they
+ * emit an excess mask and a full 64-bit multiply-add (MADD X-form).
+ *
+ * While this might not seem like much, as AArch64 is a 64-bit architecture, only
+ * big Cortex designs have a full 64-bit multiplier.
+ *
+ * On the little cores, the smaller 32-bit multiplier is used, and full 64-bit
+ * multiplies expand to 2-3 multiplies in microcode. This has a major penalty
+ * of up to 4 latency cycles and 2 stall cycles in the multiply pipeline.
+ *
+ * Thankfully, AArch64 still provides the 32-bit long multiply-add (UMADDL) which does
+ * not have this penalty and does the mask automatically.
+ */
+XXH_FORCE_INLINE xxh_u64
+XXH_mult32to64_add64(xxh_u64 lhs, xxh_u64 rhs, xxh_u64 acc)
+{
+ xxh_u64 ret;
+ /* note: %x = 64-bit register, %w = 32-bit register */
+ __asm__("umaddl %x0, %w1, %w2, %x3" : "=r" (ret) : "r" (lhs), "r" (rhs), "r" (acc));
+ return ret;
+}
+#else
+XXH_FORCE_INLINE xxh_u64
+XXH_mult32to64_add64(xxh_u64 lhs, xxh_u64 rhs, xxh_u64 acc)
+{
+ return XXH_mult32to64((xxh_u32)lhs, (xxh_u32)rhs) + acc;
+}
+#endif
+
+/*!
+ * @internal
+ * @brief Scalar round for @ref XXH3_accumulate_512_scalar().
+ *
+ * This is extracted to its own function because the NEON path uses a combination
+ * of NEON and scalar.
+ */
+XXH_FORCE_INLINE void
+XXH3_scalarRound(void* XXH_RESTRICT acc,
+ void const* XXH_RESTRICT input,
+ void const* XXH_RESTRICT secret,
+ size_t lane)
+{
+ xxh_u64* xacc = (xxh_u64*) acc;
+ xxh_u8 const* xinput = (xxh_u8 const*) input;
+ xxh_u8 const* xsecret = (xxh_u8 const*) secret;
+ XXH_ASSERT(lane < XXH_ACC_NB);
+ XXH_ASSERT(((size_t)acc & (XXH_ACC_ALIGN-1)) == 0);
+ {
+ xxh_u64 const data_val = XXH_readLE64(xinput + lane * 8);
+ xxh_u64 const data_key = data_val ^ XXH_readLE64(xsecret + lane * 8);
+ xacc[lane ^ 1] += data_val; /* swap adjacent lanes */
+ xacc[lane] = XXH_mult32to64_add64(data_key /* & 0xFFFFFFFF */, data_key >> 32, xacc[lane]);
+ }
+}
+
+/*!
+ * @internal
+ * @brief Processes a 64 byte block of data using the scalar path.
+ */
+XXH_FORCE_INLINE void
+XXH3_accumulate_512_scalar(void* XXH_RESTRICT acc,
+ const void* XXH_RESTRICT input,
+ const void* XXH_RESTRICT secret)
+{
+ size_t i;
+ /* ARM GCC refuses to unroll this loop, resulting in a 24% slowdown on ARMv6. */
+#if defined(__GNUC__) && !defined(__clang__) \
+ && (defined(__arm__) || defined(__thumb2__)) \
+ && defined(__ARM_FEATURE_UNALIGNED) /* no unaligned access just wastes bytes */ \
+ && XXH_SIZE_OPT <= 0
+# pragma GCC unroll 8
+#endif
+ for (i=0; i < XXH_ACC_NB; i++) {
+ XXH3_scalarRound(acc, input, secret, i);
+ }
+}
+XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(scalar)
+
+/*!
+ * @internal
+ * @brief Scalar scramble step for @ref XXH3_scrambleAcc_scalar().
+ *
+ * This is extracted to its own function because the NEON path uses a combination
+ * of NEON and scalar.
+ */
+XXH_FORCE_INLINE void
+XXH3_scalarScrambleRound(void* XXH_RESTRICT acc,
+ void const* XXH_RESTRICT secret,
+ size_t lane)
+{
+ xxh_u64* const xacc = (xxh_u64*) acc; /* presumed aligned */
+ const xxh_u8* const xsecret = (const xxh_u8*) secret; /* no alignment restriction */
+ XXH_ASSERT((((size_t)acc) & (XXH_ACC_ALIGN-1)) == 0);
+ XXH_ASSERT(lane < XXH_ACC_NB);
+ {
+ xxh_u64 const key64 = XXH_readLE64(xsecret + lane * 8);
+ xxh_u64 acc64 = xacc[lane];
+ acc64 = XXH_xorshift64(acc64, 47);
+ acc64 ^= key64;
+ acc64 *= XXH_PRIME32_1;
+ xacc[lane] = acc64;
+ }
+}
+
+/*!
+ * @internal
+ * @brief Scrambles the accumulators after a large chunk has been read
+ */
+XXH_FORCE_INLINE void
+XXH3_scrambleAcc_scalar(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
+{
+ size_t i;
+ for (i=0; i < XXH_ACC_NB; i++) {
+ XXH3_scalarScrambleRound(acc, secret, i);
+ }
+}
+
+XXH_FORCE_INLINE void
+XXH3_initCustomSecret_scalar(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
+{
+ /*
+ * We need a separate pointer for the hack below,
+ * which requires a non-const pointer.
+ * Any decent compiler will optimize this out otherwise.
+ */
+ const xxh_u8* kSecretPtr = XXH3_kSecret;
+ XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
+
+#if defined(__GNUC__) && defined(__aarch64__)
+ /*
+ * UGLY HACK:
+ * GCC and Clang generate a bunch of MOV/MOVK pairs for aarch64, and they are
+ * placed sequentially, in order, at the top of the unrolled loop.
+ *
+ * While MOVK is great for generating constants (2 cycles for a 64-bit
+ * constant compared to 4 cycles for LDR), it fights for bandwidth with
+ * the arithmetic instructions.
+ *
+ * I L S
+ * MOVK
+ * MOVK
+ * MOVK
+ * MOVK
+ * ADD
+ * SUB STR
+ * STR
+ * By forcing loads from memory (as the asm line causes the compiler to assume
+ * that XXH3_kSecretPtr has been changed), the pipelines are used more
+ * efficiently:
+ * I L S
+ * LDR
+ * ADD LDR
+ * SUB STR
+ * STR
+ *
+ * See XXH3_NEON_LANES for details on the pipsline.
+ *
+ * XXH3_64bits_withSeed, len == 256, Snapdragon 835
+ * without hack: 2654.4 MB/s
+ * with hack: 3202.9 MB/s
+ */
+ XXH_COMPILER_GUARD(kSecretPtr);
+#endif
+ { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / 16;
+ int i;
+ for (i=0; i < nbRounds; i++) {
+ /*
+ * The asm hack causes the compiler to assume that kSecretPtr aliases with
+ * customSecret, and on aarch64, this prevented LDP from merging two
+ * loads together for free. Putting the loads together before the stores
+ * properly generates LDP.
+ */
+ xxh_u64 lo = XXH_readLE64(kSecretPtr + 16*i) + seed64;
+ xxh_u64 hi = XXH_readLE64(kSecretPtr + 16*i + 8) - seed64;
+ XXH_writeLE64((xxh_u8*)customSecret + 16*i, lo);
+ XXH_writeLE64((xxh_u8*)customSecret + 16*i + 8, hi);
+ } }
+}
+
+
+typedef void (*XXH3_f_accumulate)(xxh_u64* XXH_RESTRICT, const xxh_u8* XXH_RESTRICT, const xxh_u8* XXH_RESTRICT, size_t);
+typedef void (*XXH3_f_scrambleAcc)(void* XXH_RESTRICT, const void*);
+typedef void (*XXH3_f_initCustomSecret)(void* XXH_RESTRICT, xxh_u64);
+
+
+#if (XXH_VECTOR == XXH_AVX512)
+
+#define XXH3_accumulate_512 XXH3_accumulate_512_avx512
+#define XXH3_accumulate XXH3_accumulate_avx512
+#define XXH3_scrambleAcc XXH3_scrambleAcc_avx512
+#define XXH3_initCustomSecret XXH3_initCustomSecret_avx512
+
+#elif (XXH_VECTOR == XXH_AVX2)
+
+#define XXH3_accumulate_512 XXH3_accumulate_512_avx2
+#define XXH3_accumulate XXH3_accumulate_avx2
+#define XXH3_scrambleAcc XXH3_scrambleAcc_avx2
+#define XXH3_initCustomSecret XXH3_initCustomSecret_avx2
+
+#elif (XXH_VECTOR == XXH_SSE2)
+
+#define XXH3_accumulate_512 XXH3_accumulate_512_sse2
+#define XXH3_accumulate XXH3_accumulate_sse2
+#define XXH3_scrambleAcc XXH3_scrambleAcc_sse2
+#define XXH3_initCustomSecret XXH3_initCustomSecret_sse2
+
+#elif (XXH_VECTOR == XXH_NEON)
+
+#define XXH3_accumulate_512 XXH3_accumulate_512_neon
+#define XXH3_accumulate XXH3_accumulate_neon
+#define XXH3_scrambleAcc XXH3_scrambleAcc_neon
+#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
+
+#elif (XXH_VECTOR == XXH_VSX)
+
+#define XXH3_accumulate_512 XXH3_accumulate_512_vsx
+#define XXH3_accumulate XXH3_accumulate_vsx
+#define XXH3_scrambleAcc XXH3_scrambleAcc_vsx
+#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
+
+#elif (XXH_VECTOR == XXH_SVE)
+#define XXH3_accumulate_512 XXH3_accumulate_512_sve
+#define XXH3_accumulate XXH3_accumulate_sve
+#define XXH3_scrambleAcc XXH3_scrambleAcc_scalar
+#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
+
+#else /* scalar */
+
+#define XXH3_accumulate_512 XXH3_accumulate_512_scalar
+#define XXH3_accumulate XXH3_accumulate_scalar
+#define XXH3_scrambleAcc XXH3_scrambleAcc_scalar
+#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
+
+#endif
+
+#if XXH_SIZE_OPT >= 1 /* don't do SIMD for initialization */
+# undef XXH3_initCustomSecret
+# define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
+#endif
+
+XXH_FORCE_INLINE void
+XXH3_hashLong_internal_loop(xxh_u64* XXH_RESTRICT acc,
+ const xxh_u8* XXH_RESTRICT input, size_t len,
+ const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
+ XXH3_f_accumulate f_acc,
+ XXH3_f_scrambleAcc f_scramble)
+{
+ size_t const nbStripesPerBlock = (secretSize - XXH_STRIPE_LEN) / XXH_SECRET_CONSUME_RATE;
+ size_t const block_len = XXH_STRIPE_LEN * nbStripesPerBlock;
+ size_t const nb_blocks = (len - 1) / block_len;
+
+ size_t n;
+
+ XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
+
+ for (n = 0; n < nb_blocks; n++) {
+ f_acc(acc, input + n*block_len, secret, nbStripesPerBlock);
+ f_scramble(acc, secret + secretSize - XXH_STRIPE_LEN);
+ }
+
+ /* last partial block */
+ XXH_ASSERT(len > XXH_STRIPE_LEN);
+ { size_t const nbStripes = ((len - 1) - (block_len * nb_blocks)) / XXH_STRIPE_LEN;
+ XXH_ASSERT(nbStripes <= (secretSize / XXH_SECRET_CONSUME_RATE));
+ f_acc(acc, input + nb_blocks*block_len, secret, nbStripes);
+
+ /* last stripe */
+ { const xxh_u8* const p = input + len - XXH_STRIPE_LEN;
+#define XXH_SECRET_LASTACC_START 7 /* not aligned on 8, last secret is different from acc & scrambler */
+ XXH3_accumulate_512(acc, p, secret + secretSize - XXH_STRIPE_LEN - XXH_SECRET_LASTACC_START);
+ } }
+}
+
+XXH_FORCE_INLINE xxh_u64
+XXH3_mix2Accs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret)
+{
+ return XXH3_mul128_fold64(
+ acc[0] ^ XXH_readLE64(secret),
+ acc[1] ^ XXH_readLE64(secret+8) );
+}
+
+static XXH64_hash_t
+XXH3_mergeAccs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret, xxh_u64 start)
+{
+ xxh_u64 result64 = start;
+ size_t i = 0;
+
+ for (i = 0; i < 4; i++) {
+ result64 += XXH3_mix2Accs(acc+2*i, secret + 16*i);
+#if defined(__clang__) /* Clang */ \
+ && (defined(__arm__) || defined(__thumb__)) /* ARMv7 */ \
+ && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \
+ && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */
+ /*
+ * UGLY HACK:
+ * Prevent autovectorization on Clang ARMv7-a. Exact same problem as
+ * the one in XXH3_len_129to240_64b. Speeds up shorter keys > 240b.
+ * XXH3_64bits, len == 256, Snapdragon 835:
+ * without hack: 2063.7 MB/s
+ * with hack: 2560.7 MB/s
+ */
+ XXH_COMPILER_GUARD(result64);
+#endif
+ }
+
+ return XXH3_avalanche(result64);
+}
+
+#define XXH3_INIT_ACC { XXH_PRIME32_3, XXH_PRIME64_1, XXH_PRIME64_2, XXH_PRIME64_3, \
+ XXH_PRIME64_4, XXH_PRIME32_2, XXH_PRIME64_5, XXH_PRIME32_1 }
+
+XXH_FORCE_INLINE XXH64_hash_t
+XXH3_hashLong_64b_internal(const void* XXH_RESTRICT input, size_t len,
+ const void* XXH_RESTRICT secret, size_t secretSize,
+ XXH3_f_accumulate f_acc,
+ XXH3_f_scrambleAcc f_scramble)
+{
+ XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
+
+ XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, f_acc, f_scramble);
+
+ /* converge into final hash */
+ XXH_STATIC_ASSERT(sizeof(acc) == 64);
+ /* do not align on 8, so that the secret is different from the accumulator */
+#define XXH_SECRET_MERGEACCS_START 11
+ XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
+ return XXH3_mergeAccs(acc, (const xxh_u8*)secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)len * XXH_PRIME64_1);
+}
+
+/*
+ * It's important for performance to transmit secret's size (when it's static)
+ * so that the compiler can properly optimize the vectorized loop.
+ * This makes a big performance difference for "medium" keys (<1 KB) when using AVX instruction set.
+ * When the secret size is unknown, or on GCC 12 where the mix of NO_INLINE and FORCE_INLINE
+ * breaks -Og, this is XXH_NO_INLINE.
+ */
+XXH3_WITH_SECRET_INLINE XXH64_hash_t
+XXH3_hashLong_64b_withSecret(const void* XXH_RESTRICT input, size_t len,
+ XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
+{
+ (void)seed64;
+ return XXH3_hashLong_64b_internal(input, len, secret, secretLen, XXH3_accumulate, XXH3_scrambleAcc);
+}
+
+/*
+ * It's preferable for performance that XXH3_hashLong is not inlined,
+ * as it results in a smaller function for small data, easier to the instruction cache.
+ * Note that inside this no_inline function, we do inline the internal loop,
+ * and provide a statically defined secret size to allow optimization of vector loop.
+ */
+XXH_NO_INLINE XXH_PUREF XXH64_hash_t
+XXH3_hashLong_64b_default(const void* XXH_RESTRICT input, size_t len,
+ XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
+{
+ (void)seed64; (void)secret; (void)secretLen;
+ return XXH3_hashLong_64b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_accumulate, XXH3_scrambleAcc);
+}
+
+/*
+ * XXH3_hashLong_64b_withSeed():
+ * Generate a custom key based on alteration of default XXH3_kSecret with the seed,
+ * and then use this key for long mode hashing.
+ *
+ * This operation is decently fast but nonetheless costs a little bit of time.
+ * Try to avoid it whenever possible (typically when seed==0).
+ *
+ * It's important for performance that XXH3_hashLong is not inlined. Not sure
+ * why (uop cache maybe?), but the difference is large and easily measurable.
+ */
+XXH_FORCE_INLINE XXH64_hash_t
+XXH3_hashLong_64b_withSeed_internal(const void* input, size_t len,
+ XXH64_hash_t seed,
+ XXH3_f_accumulate f_acc,
+ XXH3_f_scrambleAcc f_scramble,
+ XXH3_f_initCustomSecret f_initSec)
+{
+#if XXH_SIZE_OPT <= 0
+ if (seed == 0)
+ return XXH3_hashLong_64b_internal(input, len,
+ XXH3_kSecret, sizeof(XXH3_kSecret),
+ f_acc, f_scramble);
+#endif
+ { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
+ f_initSec(secret, seed);
+ return XXH3_hashLong_64b_internal(input, len, secret, sizeof(secret),
+ f_acc, f_scramble);
+ }
+}
+
+/*
+ * It's important for performance that XXH3_hashLong is not inlined.
+ */
+XXH_NO_INLINE XXH64_hash_t
+XXH3_hashLong_64b_withSeed(const void* XXH_RESTRICT input, size_t len,
+ XXH64_hash_t seed, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
+{
+ (void)secret; (void)secretLen;
+ return XXH3_hashLong_64b_withSeed_internal(input, len, seed,
+ XXH3_accumulate, XXH3_scrambleAcc, XXH3_initCustomSecret);
+}
+
+
+typedef XXH64_hash_t (*XXH3_hashLong64_f)(const void* XXH_RESTRICT, size_t,
+ XXH64_hash_t, const xxh_u8* XXH_RESTRICT, size_t);
+
+XXH_FORCE_INLINE XXH64_hash_t
+XXH3_64bits_internal(const void* XXH_RESTRICT input, size_t len,
+ XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen,
+ XXH3_hashLong64_f f_hashLong)
+{
+ XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
+ /*
+ * If an action is to be taken if `secretLen` condition is not respected,
+ * it should be done here.
+ * For now, it's a contract pre-condition.
+ * Adding a check and a branch here would cost performance at every hash.
+ * Also, note that function signature doesn't offer room to return an error.
+ */
+ if (len <= 16)
+ return XXH3_len_0to16_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64);
+ if (len <= 128)
+ return XXH3_len_17to128_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
+ if (len <= XXH3_MIDSIZE_MAX)
+ return XXH3_len_129to240_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
+ return f_hashLong(input, len, seed64, (const xxh_u8*)secret, secretLen);
+}
+
+
+/* === Public entry point === */
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(XXH_NOESCAPE const void* input, size_t length)
+{
+ return XXH3_64bits_internal(input, length, 0, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_default);
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH64_hash_t
+XXH3_64bits_withSecret(XXH_NOESCAPE const void* input, size_t length, XXH_NOESCAPE const void* secret, size_t secretSize)
+{
+ return XXH3_64bits_internal(input, length, 0, secret, secretSize, XXH3_hashLong_64b_withSecret);
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH64_hash_t
+XXH3_64bits_withSeed(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed)
+{
+ return XXH3_64bits_internal(input, length, seed, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_withSeed);
+}
+
+XXH_PUBLIC_API XXH64_hash_t
+XXH3_64bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t length, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed)
+{
+ if (length <= XXH3_MIDSIZE_MAX)
+ return XXH3_64bits_internal(input, length, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL);
+ return XXH3_hashLong_64b_withSecret(input, length, seed, (const xxh_u8*)secret, secretSize);
+}
+
+
+/* === XXH3 streaming === */
+#ifndef XXH_NO_STREAM
+/*
+ * Malloc's a pointer that is always aligned to align.
+ *
+ * This must be freed with `XXH_alignedFree()`.
+ *
+ * malloc typically guarantees 16 byte alignment on 64-bit systems and 8 byte
+ * alignment on 32-bit. This isn't enough for the 32 byte aligned loads in AVX2
+ * or on 32-bit, the 16 byte aligned loads in SSE2 and NEON.
+ *
+ * This underalignment previously caused a rather obvious crash which went
+ * completely unnoticed due to XXH3_createState() not actually being tested.
+ * Credit to RedSpah for noticing this bug.
+ *
+ * The alignment is done manually: Functions like posix_memalign or _mm_malloc
+ * are avoided: To maintain portability, we would have to write a fallback
+ * like this anyways, and besides, testing for the existence of library
+ * functions without relying on external build tools is impossible.
+ *
+ * The method is simple: Overallocate, manually align, and store the offset
+ * to the original behind the returned pointer.
+ *
+ * Align must be a power of 2 and 8 <= align <= 128.
+ */
+static XXH_MALLOCF void* XXH_alignedMalloc(size_t s, size_t align)
+{
+ XXH_ASSERT(align <= 128 && align >= 8); /* range check */
+ XXH_ASSERT((align & (align-1)) == 0); /* power of 2 */
+ XXH_ASSERT(s != 0 && s < (s + align)); /* empty/overflow */
+ { /* Overallocate to make room for manual realignment and an offset byte */
+ xxh_u8* base = (xxh_u8*)XXH_malloc(s + align);
+ if (base != NULL) {
+ /*
+ * Get the offset needed to align this pointer.
+ *
+ * Even if the returned pointer is aligned, there will always be
+ * at least one byte to store the offset to the original pointer.
+ */
+ size_t offset = align - ((size_t)base & (align - 1)); /* base % align */
+ /* Add the offset for the now-aligned pointer */
+ xxh_u8* ptr = base + offset;
+
+ XXH_ASSERT((size_t)ptr % align == 0);
+
+ /* Store the offset immediately before the returned pointer. */
+ ptr[-1] = (xxh_u8)offset;
+ return ptr;
+ }
+ return NULL;
+ }
+}
+/*
+ * Frees an aligned pointer allocated by XXH_alignedMalloc(). Don't pass
+ * normal malloc'd pointers, XXH_alignedMalloc has a specific data layout.
+ */
+static void XXH_alignedFree(void* p)
+{
+ if (p != NULL) {
+ xxh_u8* ptr = (xxh_u8*)p;
+ /* Get the offset byte we added in XXH_malloc. */
+ xxh_u8 offset = ptr[-1];
+ /* Free the original malloc'd pointer */
+ xxh_u8* base = ptr - offset;
+ XXH_free(base);
+ }
+}
+/*! @ingroup XXH3_family */
+/*!
+ * @brief Allocate an @ref XXH3_state_t.
+ *
+ * Must be freed with XXH3_freeState().
+ * @return An allocated XXH3_state_t on success, `NULL` on failure.
+ */
+XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void)
+{
+ XXH3_state_t* const state = (XXH3_state_t*)XXH_alignedMalloc(sizeof(XXH3_state_t), 64);
+ if (state==NULL) return NULL;
+ XXH3_INITSTATE(state);
+ return state;
+}
+
+/*! @ingroup XXH3_family */
+/*!
+ * @brief Frees an @ref XXH3_state_t.
+ *
+ * Must be allocated with XXH3_createState().
+ * @param statePtr A pointer to an @ref XXH3_state_t allocated with @ref XXH3_createState().
+ * @return XXH_OK.
+ */
+XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr)
+{
+ XXH_alignedFree(statePtr);
+ return XXH_OK;
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API void
+XXH3_copyState(XXH_NOESCAPE XXH3_state_t* dst_state, XXH_NOESCAPE const XXH3_state_t* src_state)
+{
+ XXH_memcpy(dst_state, src_state, sizeof(*dst_state));
+}
+
+static void
+XXH3_reset_internal(XXH3_state_t* statePtr,
+ XXH64_hash_t seed,
+ const void* secret, size_t secretSize)
+{
+ size_t const initStart = offsetof(XXH3_state_t, bufferedSize);
+ size_t const initLength = offsetof(XXH3_state_t, nbStripesPerBlock) - initStart;
+ XXH_ASSERT(offsetof(XXH3_state_t, nbStripesPerBlock) > initStart);
+ XXH_ASSERT(statePtr != NULL);
+ /* set members from bufferedSize to nbStripesPerBlock (excluded) to 0 */
+ memset((char*)statePtr + initStart, 0, initLength);
+ statePtr->acc[0] = XXH_PRIME32_3;
+ statePtr->acc[1] = XXH_PRIME64_1;
+ statePtr->acc[2] = XXH_PRIME64_2;
+ statePtr->acc[3] = XXH_PRIME64_3;
+ statePtr->acc[4] = XXH_PRIME64_4;
+ statePtr->acc[5] = XXH_PRIME32_2;
+ statePtr->acc[6] = XXH_PRIME64_5;
+ statePtr->acc[7] = XXH_PRIME32_1;
+ statePtr->seed = seed;
+ statePtr->useSeed = (seed != 0);
+ statePtr->extSecret = (const unsigned char*)secret;
+ XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
+ statePtr->secretLimit = secretSize - XXH_STRIPE_LEN;
+ statePtr->nbStripesPerBlock = statePtr->secretLimit / XXH_SECRET_CONSUME_RATE;
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_64bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr)
+{
+ if (statePtr == NULL) return XXH_ERROR;
+ XXH3_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE);
+ return XXH_OK;
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_64bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize)
+{
+ if (statePtr == NULL) return XXH_ERROR;
+ XXH3_reset_internal(statePtr, 0, secret, secretSize);
+ if (secret == NULL) return XXH_ERROR;
+ if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
+ return XXH_OK;
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_64bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed)
+{
+ if (statePtr == NULL) return XXH_ERROR;
+ if (seed==0) return XXH3_64bits_reset(statePtr);
+ if ((seed != statePtr->seed) || (statePtr->extSecret != NULL))
+ XXH3_initCustomSecret(statePtr->customSecret, seed);
+ XXH3_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE);
+ return XXH_OK;
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_64bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed64)
+{
+ if (statePtr == NULL) return XXH_ERROR;
+ if (secret == NULL) return XXH_ERROR;
+ if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
+ XXH3_reset_internal(statePtr, seed64, secret, secretSize);
+ statePtr->useSeed = 1; /* always, even if seed64==0 */
+ return XXH_OK;
+}
+
+/*!
+ * @internal
+ * @brief Processes a large input for XXH3_update() and XXH3_digest_long().
+ *
+ * Unlike XXH3_hashLong_internal_loop(), this can process data that overlaps a block.
+ *
+ * @param acc Pointer to the 8 accumulator lanes
+ * @param nbStripesSoFarPtr In/out pointer to the number of leftover stripes in the block*
+ * @param nbStripesPerBlock Number of stripes in a block
+ * @param input Input pointer
+ * @param nbStripes Number of stripes to process
+ * @param secret Secret pointer
+ * @param secretLimit Offset of the last block in @p secret
+ * @param f_acc Pointer to an XXH3_accumulate implementation
+ * @param f_scramble Pointer to an XXH3_scrambleAcc implementation
+ * @return Pointer past the end of @p input after processing
+ */
+XXH_FORCE_INLINE const xxh_u8 *
+XXH3_consumeStripes(xxh_u64* XXH_RESTRICT acc,
+ size_t* XXH_RESTRICT nbStripesSoFarPtr, size_t nbStripesPerBlock,
+ const xxh_u8* XXH_RESTRICT input, size_t nbStripes,
+ const xxh_u8* XXH_RESTRICT secret, size_t secretLimit,
+ XXH3_f_accumulate f_acc,
+ XXH3_f_scrambleAcc f_scramble)
+{
+ const xxh_u8* initialSecret = secret + *nbStripesSoFarPtr * XXH_SECRET_CONSUME_RATE;
+ /* Process full blocks */
+ if (nbStripes >= (nbStripesPerBlock - *nbStripesSoFarPtr)) {
+ /* Process the initial partial block... */
+ size_t nbStripesThisIter = nbStripesPerBlock - *nbStripesSoFarPtr;
+
+ do {
+ /* Accumulate and scramble */
+ f_acc(acc, input, initialSecret, nbStripesThisIter);
+ f_scramble(acc, secret + secretLimit);
+ input += nbStripesThisIter * XXH_STRIPE_LEN;
+ nbStripes -= nbStripesThisIter;
+ /* Then continue the loop with the full block size */
+ nbStripesThisIter = nbStripesPerBlock;
+ initialSecret = secret;
+ } while (nbStripes >= nbStripesPerBlock);
+ *nbStripesSoFarPtr = 0;
+ }
+ /* Process a partial block */
+ if (nbStripes > 0) {
+ f_acc(acc, input, initialSecret, nbStripes);
+ input += nbStripes * XXH_STRIPE_LEN;
+ *nbStripesSoFarPtr += nbStripes;
+ }
+ /* Return end pointer */
+ return input;
+}
+
+#ifndef XXH3_STREAM_USE_STACK
+# if XXH_SIZE_OPT <= 0 && !defined(__clang__) /* clang doesn't need additional stack space */
+# define XXH3_STREAM_USE_STACK 1
+# endif
+#endif
+/*
+ * Both XXH3_64bits_update and XXH3_128bits_update use this routine.
+ */
+XXH_FORCE_INLINE XXH_errorcode
+XXH3_update(XXH3_state_t* XXH_RESTRICT const state,
+ const xxh_u8* XXH_RESTRICT input, size_t len,
+ XXH3_f_accumulate f_acc,
+ XXH3_f_scrambleAcc f_scramble)
+{
+ if (input==NULL) {
+ XXH_ASSERT(len == 0);
+ return XXH_OK;
+ }
+
+ XXH_ASSERT(state != NULL);
+ { const xxh_u8* const bEnd = input + len;
+ const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
+#if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1
+ /* For some reason, gcc and MSVC seem to suffer greatly
+ * when operating accumulators directly into state.
+ * Operating into stack space seems to enable proper optimization.
+ * clang, on the other hand, doesn't seem to need this trick */
+ XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[8];
+ XXH_memcpy(acc, state->acc, sizeof(acc));
+#else
+ xxh_u64* XXH_RESTRICT const acc = state->acc;
+#endif
+ state->totalLen += len;
+ XXH_ASSERT(state->bufferedSize <= XXH3_INTERNALBUFFER_SIZE);
+
+ /* small input : just fill in tmp buffer */
+ if (len <= XXH3_INTERNALBUFFER_SIZE - state->bufferedSize) {
+ XXH_memcpy(state->buffer + state->bufferedSize, input, len);
+ state->bufferedSize += (XXH32_hash_t)len;
+ return XXH_OK;
+ }
+
+ /* total input is now > XXH3_INTERNALBUFFER_SIZE */
+ #define XXH3_INTERNALBUFFER_STRIPES (XXH3_INTERNALBUFFER_SIZE / XXH_STRIPE_LEN)
+ XXH_STATIC_ASSERT(XXH3_INTERNALBUFFER_SIZE % XXH_STRIPE_LEN == 0); /* clean multiple */
+
+ /*
+ * Internal buffer is partially filled (always, except at beginning)
+ * Complete it, then consume it.
+ */
+ if (state->bufferedSize) {
+ size_t const loadSize = XXH3_INTERNALBUFFER_SIZE - state->bufferedSize;
+ XXH_memcpy(state->buffer + state->bufferedSize, input, loadSize);
+ input += loadSize;
+ XXH3_consumeStripes(acc,
+ &state->nbStripesSoFar, state->nbStripesPerBlock,
+ state->buffer, XXH3_INTERNALBUFFER_STRIPES,
+ secret, state->secretLimit,
+ f_acc, f_scramble);
+ state->bufferedSize = 0;
+ }
+ XXH_ASSERT(input < bEnd);
+ if (bEnd - input > XXH3_INTERNALBUFFER_SIZE) {
+ size_t nbStripes = (size_t)(bEnd - 1 - input) / XXH_STRIPE_LEN;
+ input = XXH3_consumeStripes(acc,
+ &state->nbStripesSoFar, state->nbStripesPerBlock,
+ input, nbStripes,
+ secret, state->secretLimit,
+ f_acc, f_scramble);
+ XXH_memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN);
+
+ }
+ /* Some remaining input (always) : buffer it */
+ XXH_ASSERT(input < bEnd);
+ XXH_ASSERT(bEnd - input <= XXH3_INTERNALBUFFER_SIZE);
+ XXH_ASSERT(state->bufferedSize == 0);
+ XXH_memcpy(state->buffer, input, (size_t)(bEnd-input));
+ state->bufferedSize = (XXH32_hash_t)(bEnd-input);
+#if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1
+ /* save stack accumulators into state */
+ XXH_memcpy(state->acc, acc, sizeof(acc));
+#endif
+ }
+
+ return XXH_OK;
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_64bits_update(XXH_NOESCAPE XXH3_state_t* state, XXH_NOESCAPE const void* input, size_t len)
+{
+ return XXH3_update(state, (const xxh_u8*)input, len,
+ XXH3_accumulate, XXH3_scrambleAcc);
+}
+
+
+XXH_FORCE_INLINE void
+XXH3_digest_long (XXH64_hash_t* acc,
+ const XXH3_state_t* state,
+ const unsigned char* secret)
+{
+ xxh_u8 lastStripe[XXH_STRIPE_LEN];
+ const xxh_u8* lastStripePtr;
+
+ /*
+ * Digest on a local copy. This way, the state remains unaltered, and it can
+ * continue ingesting more input afterwards.
+ */
+ XXH_memcpy(acc, state->acc, sizeof(state->acc));
+ if (state->bufferedSize >= XXH_STRIPE_LEN) {
+ /* Consume remaining stripes then point to remaining data in buffer */
+ size_t const nbStripes = (state->bufferedSize - 1) / XXH_STRIPE_LEN;
+ size_t nbStripesSoFar = state->nbStripesSoFar;
+ XXH3_consumeStripes(acc,
+ &nbStripesSoFar, state->nbStripesPerBlock,
+ state->buffer, nbStripes,
+ secret, state->secretLimit,
+ XXH3_accumulate, XXH3_scrambleAcc);
+ lastStripePtr = state->buffer + state->bufferedSize - XXH_STRIPE_LEN;
+ } else { /* bufferedSize < XXH_STRIPE_LEN */
+ /* Copy to temp buffer */
+ size_t const catchupSize = XXH_STRIPE_LEN - state->bufferedSize;
+ XXH_ASSERT(state->bufferedSize > 0); /* there is always some input buffered */
+ XXH_memcpy(lastStripe, state->buffer + sizeof(state->buffer) - catchupSize, catchupSize);
+ XXH_memcpy(lastStripe + catchupSize, state->buffer, state->bufferedSize);
+ lastStripePtr = lastStripe;
+ }
+ /* Last stripe */
+ XXH3_accumulate_512(acc,
+ lastStripePtr,
+ secret + state->secretLimit - XXH_SECRET_LASTACC_START);
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (XXH_NOESCAPE const XXH3_state_t* state)
+{
+ const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
+ if (state->totalLen > XXH3_MIDSIZE_MAX) {
+ XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
+ XXH3_digest_long(acc, state, secret);
+ return XXH3_mergeAccs(acc,
+ secret + XXH_SECRET_MERGEACCS_START,
+ (xxh_u64)state->totalLen * XXH_PRIME64_1);
+ }
+ /* totalLen <= XXH3_MIDSIZE_MAX: digesting a short input */
+ if (state->useSeed)
+ return XXH3_64bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed);
+ return XXH3_64bits_withSecret(state->buffer, (size_t)(state->totalLen),
+ secret, state->secretLimit + XXH_STRIPE_LEN);
+}
+#endif /* !XXH_NO_STREAM */
+
+
+/* ==========================================
+ * XXH3 128 bits (a.k.a XXH128)
+ * ==========================================
+ * XXH3's 128-bit variant has better mixing and strength than the 64-bit variant,
+ * even without counting the significantly larger output size.
+ *
+ * For example, extra steps are taken to avoid the seed-dependent collisions
+ * in 17-240 byte inputs (See XXH3_mix16B and XXH128_mix32B).
+ *
+ * This strength naturally comes at the cost of some speed, especially on short
+ * lengths. Note that longer hashes are about as fast as the 64-bit version
+ * due to it using only a slight modification of the 64-bit loop.
+ *
+ * XXH128 is also more oriented towards 64-bit machines. It is still extremely
+ * fast for a _128-bit_ hash on 32-bit (it usually clears XXH64).
+ */
+
+XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
+XXH3_len_1to3_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
+{
+ /* A doubled version of 1to3_64b with different constants. */
+ XXH_ASSERT(input != NULL);
+ XXH_ASSERT(1 <= len && len <= 3);
+ XXH_ASSERT(secret != NULL);
+ /*
+ * len = 1: combinedl = { input[0], 0x01, input[0], input[0] }
+ * len = 2: combinedl = { input[1], 0x02, input[0], input[1] }
+ * len = 3: combinedl = { input[2], 0x03, input[0], input[1] }
+ */
+ { xxh_u8 const c1 = input[0];
+ xxh_u8 const c2 = input[len >> 1];
+ xxh_u8 const c3 = input[len - 1];
+ xxh_u32 const combinedl = ((xxh_u32)c1 <<16) | ((xxh_u32)c2 << 24)
+ | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8);
+ xxh_u32 const combinedh = XXH_rotl32(XXH_swap32(combinedl), 13);
+ xxh_u64 const bitflipl = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed;
+ xxh_u64 const bitfliph = (XXH_readLE32(secret+8) ^ XXH_readLE32(secret+12)) - seed;
+ xxh_u64 const keyed_lo = (xxh_u64)combinedl ^ bitflipl;
+ xxh_u64 const keyed_hi = (xxh_u64)combinedh ^ bitfliph;
+ XXH128_hash_t h128;
+ h128.low64 = XXH64_avalanche(keyed_lo);
+ h128.high64 = XXH64_avalanche(keyed_hi);
+ return h128;
+ }
+}
+
+XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
+XXH3_len_4to8_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
+{
+ XXH_ASSERT(input != NULL);
+ XXH_ASSERT(secret != NULL);
+ XXH_ASSERT(4 <= len && len <= 8);
+ seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
+ { xxh_u32 const input_lo = XXH_readLE32(input);
+ xxh_u32 const input_hi = XXH_readLE32(input + len - 4);
+ xxh_u64 const input_64 = input_lo + ((xxh_u64)input_hi << 32);
+ xxh_u64 const bitflip = (XXH_readLE64(secret+16) ^ XXH_readLE64(secret+24)) + seed;
+ xxh_u64 const keyed = input_64 ^ bitflip;
+
+ /* Shift len to the left to ensure it is even, this avoids even multiplies. */
+ XXH128_hash_t m128 = XXH_mult64to128(keyed, XXH_PRIME64_1 + (len << 2));
+
+ m128.high64 += (m128.low64 << 1);
+ m128.low64 ^= (m128.high64 >> 3);
+
+ m128.low64 = XXH_xorshift64(m128.low64, 35);
+ m128.low64 *= PRIME_MX2;
+ m128.low64 = XXH_xorshift64(m128.low64, 28);
+ m128.high64 = XXH3_avalanche(m128.high64);
+ return m128;
+ }
+}
+
+XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
+XXH3_len_9to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
+{
+ XXH_ASSERT(input != NULL);
+ XXH_ASSERT(secret != NULL);
+ XXH_ASSERT(9 <= len && len <= 16);
+ { xxh_u64 const bitflipl = (XXH_readLE64(secret+32) ^ XXH_readLE64(secret+40)) - seed;
+ xxh_u64 const bitfliph = (XXH_readLE64(secret+48) ^ XXH_readLE64(secret+56)) + seed;
+ xxh_u64 const input_lo = XXH_readLE64(input);
+ xxh_u64 input_hi = XXH_readLE64(input + len - 8);
+ XXH128_hash_t m128 = XXH_mult64to128(input_lo ^ input_hi ^ bitflipl, XXH_PRIME64_1);
+ /*
+ * Put len in the middle of m128 to ensure that the length gets mixed to
+ * both the low and high bits in the 128x64 multiply below.
+ */
+ m128.low64 += (xxh_u64)(len - 1) << 54;
+ input_hi ^= bitfliph;
+ /*
+ * Add the high 32 bits of input_hi to the high 32 bits of m128, then
+ * add the long product of the low 32 bits of input_hi and XXH_PRIME32_2 to
+ * the high 64 bits of m128.
+ *
+ * The best approach to this operation is different on 32-bit and 64-bit.
+ */
+ if (sizeof(void *) < sizeof(xxh_u64)) { /* 32-bit */
+ /*
+ * 32-bit optimized version, which is more readable.
+ *
+ * On 32-bit, it removes an ADC and delays a dependency between the two
+ * halves of m128.high64, but it generates an extra mask on 64-bit.
+ */
+ m128.high64 += (input_hi & 0xFFFFFFFF00000000ULL) + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2);
+ } else {
+ /*
+ * 64-bit optimized (albeit more confusing) version.
+ *
+ * Uses some properties of addition and multiplication to remove the mask:
+ *
+ * Let:
+ * a = input_hi.lo = (input_hi & 0x00000000FFFFFFFF)
+ * b = input_hi.hi = (input_hi & 0xFFFFFFFF00000000)
+ * c = XXH_PRIME32_2
+ *
+ * a + (b * c)
+ * Inverse Property: x + y - x == y
+ * a + (b * (1 + c - 1))
+ * Distributive Property: x * (y + z) == (x * y) + (x * z)
+ * a + (b * 1) + (b * (c - 1))
+ * Identity Property: x * 1 == x
+ * a + b + (b * (c - 1))
+ *
+ * Substitute a, b, and c:
+ * input_hi.hi + input_hi.lo + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1))
+ *
+ * Since input_hi.hi + input_hi.lo == input_hi, we get this:
+ * input_hi + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1))
+ */
+ m128.high64 += input_hi + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2 - 1);
+ }
+ /* m128 ^= XXH_swap64(m128 >> 64); */
+ m128.low64 ^= XXH_swap64(m128.high64);
+
+ { /* 128x64 multiply: h128 = m128 * XXH_PRIME64_2; */
+ XXH128_hash_t h128 = XXH_mult64to128(m128.low64, XXH_PRIME64_2);
+ h128.high64 += m128.high64 * XXH_PRIME64_2;
+
+ h128.low64 = XXH3_avalanche(h128.low64);
+ h128.high64 = XXH3_avalanche(h128.high64);
+ return h128;
+ } }
+}
+
+/*
+ * Assumption: `secret` size is >= XXH3_SECRET_SIZE_MIN
+ */
+XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
+XXH3_len_0to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
+{
+ XXH_ASSERT(len <= 16);
+ { if (len > 8) return XXH3_len_9to16_128b(input, len, secret, seed);
+ if (len >= 4) return XXH3_len_4to8_128b(input, len, secret, seed);
+ if (len) return XXH3_len_1to3_128b(input, len, secret, seed);
+ { XXH128_hash_t h128;
+ xxh_u64 const bitflipl = XXH_readLE64(secret+64) ^ XXH_readLE64(secret+72);
+ xxh_u64 const bitfliph = XXH_readLE64(secret+80) ^ XXH_readLE64(secret+88);
+ h128.low64 = XXH64_avalanche(seed ^ bitflipl);
+ h128.high64 = XXH64_avalanche( seed ^ bitfliph);
+ return h128;
+ } }
+}
+
+/*
+ * A bit slower than XXH3_mix16B, but handles multiply by zero better.
+ */
+XXH_FORCE_INLINE XXH128_hash_t
+XXH128_mix32B(XXH128_hash_t acc, const xxh_u8* input_1, const xxh_u8* input_2,
+ const xxh_u8* secret, XXH64_hash_t seed)
+{
+ acc.low64 += XXH3_mix16B (input_1, secret+0, seed);
+ acc.low64 ^= XXH_readLE64(input_2) + XXH_readLE64(input_2 + 8);
+ acc.high64 += XXH3_mix16B (input_2, secret+16, seed);
+ acc.high64 ^= XXH_readLE64(input_1) + XXH_readLE64(input_1 + 8);
+ return acc;
+}
+
+
+XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
+XXH3_len_17to128_128b(const xxh_u8* XXH_RESTRICT input, size_t len,
+ const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
+ XXH64_hash_t seed)
+{
+ XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
+ XXH_ASSERT(16 < len && len <= 128);
+
+ { XXH128_hash_t acc;
+ acc.low64 = len * XXH_PRIME64_1;
+ acc.high64 = 0;
+
+#if XXH_SIZE_OPT >= 1
+ {
+ /* Smaller, but slightly slower. */
+ unsigned int i = (unsigned int)(len - 1) / 32;
+ do {
+ acc = XXH128_mix32B(acc, input+16*i, input+len-16*(i+1), secret+32*i, seed);
+ } while (i-- != 0);
+ }
+#else
+ if (len > 32) {
+ if (len > 64) {
+ if (len > 96) {
+ acc = XXH128_mix32B(acc, input+48, input+len-64, secret+96, seed);
+ }
+ acc = XXH128_mix32B(acc, input+32, input+len-48, secret+64, seed);
+ }
+ acc = XXH128_mix32B(acc, input+16, input+len-32, secret+32, seed);
+ }
+ acc = XXH128_mix32B(acc, input, input+len-16, secret, seed);
+#endif
+ { XXH128_hash_t h128;
+ h128.low64 = acc.low64 + acc.high64;
+ h128.high64 = (acc.low64 * XXH_PRIME64_1)
+ + (acc.high64 * XXH_PRIME64_4)
+ + ((len - seed) * XXH_PRIME64_2);
+ h128.low64 = XXH3_avalanche(h128.low64);
+ h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
+ return h128;
+ }
+ }
+}
+
+XXH_NO_INLINE XXH_PUREF XXH128_hash_t
+XXH3_len_129to240_128b(const xxh_u8* XXH_RESTRICT input, size_t len,
+ const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
+ XXH64_hash_t seed)
+{
+ XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
+ XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
+
+ { XXH128_hash_t acc;
+ unsigned i;
+ acc.low64 = len * XXH_PRIME64_1;
+ acc.high64 = 0;
+ /*
+ * We set as `i` as offset + 32. We do this so that unchanged
+ * `len` can be used as upper bound. This reaches a sweet spot
+ * where both x86 and aarch64 get simple agen and good codegen
+ * for the loop.
+ */
+ for (i = 32; i < 160; i += 32) {
+ acc = XXH128_mix32B(acc,
+ input + i - 32,
+ input + i - 16,
+ secret + i - 32,
+ seed);
+ }
+ acc.low64 = XXH3_avalanche(acc.low64);
+ acc.high64 = XXH3_avalanche(acc.high64);
+ /*
+ * NB: `i <= len` will duplicate the last 32-bytes if
+ * len % 32 was zero. This is an unfortunate necessity to keep
+ * the hash result stable.
+ */
+ for (i=160; i <= len; i += 32) {
+ acc = XXH128_mix32B(acc,
+ input + i - 32,
+ input + i - 16,
+ secret + XXH3_MIDSIZE_STARTOFFSET + i - 160,
+ seed);
+ }
+ /* last bytes */
+ acc = XXH128_mix32B(acc,
+ input + len - 16,
+ input + len - 32,
+ secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET - 16,
+ (XXH64_hash_t)0 - seed);
+
+ { XXH128_hash_t h128;
+ h128.low64 = acc.low64 + acc.high64;
+ h128.high64 = (acc.low64 * XXH_PRIME64_1)
+ + (acc.high64 * XXH_PRIME64_4)
+ + ((len - seed) * XXH_PRIME64_2);
+ h128.low64 = XXH3_avalanche(h128.low64);
+ h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
+ return h128;
+ }
+ }
+}
+
+XXH_FORCE_INLINE XXH128_hash_t
+XXH3_hashLong_128b_internal(const void* XXH_RESTRICT input, size_t len,
+ const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
+ XXH3_f_accumulate f_acc,
+ XXH3_f_scrambleAcc f_scramble)
+{
+ XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
+
+ XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, secret, secretSize, f_acc, f_scramble);
+
+ /* converge into final hash */
+ XXH_STATIC_ASSERT(sizeof(acc) == 64);
+ XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
+ { XXH128_hash_t h128;
+ h128.low64 = XXH3_mergeAccs(acc,
+ secret + XXH_SECRET_MERGEACCS_START,
+ (xxh_u64)len * XXH_PRIME64_1);
+ h128.high64 = XXH3_mergeAccs(acc,
+ secret + secretSize
+ - sizeof(acc) - XXH_SECRET_MERGEACCS_START,
+ ~((xxh_u64)len * XXH_PRIME64_2));
+ return h128;
+ }
+}
+
+/*
+ * It's important for performance that XXH3_hashLong() is not inlined.
+ */
+XXH_NO_INLINE XXH_PUREF XXH128_hash_t
+XXH3_hashLong_128b_default(const void* XXH_RESTRICT input, size_t len,
+ XXH64_hash_t seed64,
+ const void* XXH_RESTRICT secret, size_t secretLen)
+{
+ (void)seed64; (void)secret; (void)secretLen;
+ return XXH3_hashLong_128b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret),
+ XXH3_accumulate, XXH3_scrambleAcc);
+}
+
+/*
+ * It's important for performance to pass @p secretLen (when it's static)
+ * to the compiler, so that it can properly optimize the vectorized loop.
+ *
+ * When the secret size is unknown, or on GCC 12 where the mix of NO_INLINE and FORCE_INLINE
+ * breaks -Og, this is XXH_NO_INLINE.
+ */
+XXH3_WITH_SECRET_INLINE XXH128_hash_t
+XXH3_hashLong_128b_withSecret(const void* XXH_RESTRICT input, size_t len,
+ XXH64_hash_t seed64,
+ const void* XXH_RESTRICT secret, size_t secretLen)
+{
+ (void)seed64;
+ return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, secretLen,
+ XXH3_accumulate, XXH3_scrambleAcc);
+}
+
+XXH_FORCE_INLINE XXH128_hash_t
+XXH3_hashLong_128b_withSeed_internal(const void* XXH_RESTRICT input, size_t len,
+ XXH64_hash_t seed64,
+ XXH3_f_accumulate f_acc,
+ XXH3_f_scrambleAcc f_scramble,
+ XXH3_f_initCustomSecret f_initSec)
+{
+ if (seed64 == 0)
+ return XXH3_hashLong_128b_internal(input, len,
+ XXH3_kSecret, sizeof(XXH3_kSecret),
+ f_acc, f_scramble);
+ { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
+ f_initSec(secret, seed64);
+ return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, sizeof(secret),
+ f_acc, f_scramble);
+ }
+}
+
+/*
+ * It's important for performance that XXH3_hashLong is not inlined.
+ */
+XXH_NO_INLINE XXH128_hash_t
+XXH3_hashLong_128b_withSeed(const void* input, size_t len,
+ XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen)
+{
+ (void)secret; (void)secretLen;
+ return XXH3_hashLong_128b_withSeed_internal(input, len, seed64,
+ XXH3_accumulate, XXH3_scrambleAcc, XXH3_initCustomSecret);
+}
+
+typedef XXH128_hash_t (*XXH3_hashLong128_f)(const void* XXH_RESTRICT, size_t,
+ XXH64_hash_t, const void* XXH_RESTRICT, size_t);
+
+XXH_FORCE_INLINE XXH128_hash_t
+XXH3_128bits_internal(const void* input, size_t len,
+ XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen,
+ XXH3_hashLong128_f f_hl128)
+{
+ XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
+ /*
+ * If an action is to be taken if `secret` conditions are not respected,
+ * it should be done here.
+ * For now, it's a contract pre-condition.
+ * Adding a check and a branch here would cost performance at every hash.
+ */
+ if (len <= 16)
+ return XXH3_len_0to16_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64);
+ if (len <= 128)
+ return XXH3_len_17to128_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
+ if (len <= XXH3_MIDSIZE_MAX)
+ return XXH3_len_129to240_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
+ return f_hl128(input, len, seed64, secret, secretLen);
+}
+
+
+/* === Public XXH128 API === */
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(XXH_NOESCAPE const void* input, size_t len)
+{
+ return XXH3_128bits_internal(input, len, 0,
+ XXH3_kSecret, sizeof(XXH3_kSecret),
+ XXH3_hashLong_128b_default);
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH128_hash_t
+XXH3_128bits_withSecret(XXH_NOESCAPE const void* input, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize)
+{
+ return XXH3_128bits_internal(input, len, 0,
+ (const xxh_u8*)secret, secretSize,
+ XXH3_hashLong_128b_withSecret);
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH128_hash_t
+XXH3_128bits_withSeed(XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed)
+{
+ return XXH3_128bits_internal(input, len, seed,
+ XXH3_kSecret, sizeof(XXH3_kSecret),
+ XXH3_hashLong_128b_withSeed);
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH128_hash_t
+XXH3_128bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed)
+{
+ if (len <= XXH3_MIDSIZE_MAX)
+ return XXH3_128bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL);
+ return XXH3_hashLong_128b_withSecret(input, len, seed, secret, secretSize);
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH128_hash_t
+XXH128(XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed)
+{
+ return XXH3_128bits_withSeed(input, len, seed);
+}
+
+
+/* === XXH3 128-bit streaming === */
+#ifndef XXH_NO_STREAM
+/*
+ * All initialization and update functions are identical to 64-bit streaming variant.
+ * The only difference is the finalization routine.
+ */
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_128bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr)
+{
+ return XXH3_64bits_reset(statePtr);
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_128bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize)
+{
+ return XXH3_64bits_reset_withSecret(statePtr, secret, secretSize);
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_128bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed)
+{
+ return XXH3_64bits_reset_withSeed(statePtr, seed);
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_128bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed)
+{
+ return XXH3_64bits_reset_withSecretandSeed(statePtr, secret, secretSize, seed);
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_128bits_update(XXH_NOESCAPE XXH3_state_t* state, XXH_NOESCAPE const void* input, size_t len)
+{
+ return XXH3_64bits_update(state, input, len);
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (XXH_NOESCAPE const XXH3_state_t* state)
+{
+ const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
+ if (state->totalLen > XXH3_MIDSIZE_MAX) {
+ XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
+ XXH3_digest_long(acc, state, secret);
+ XXH_ASSERT(state->secretLimit + XXH_STRIPE_LEN >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
+ { XXH128_hash_t h128;
+ h128.low64 = XXH3_mergeAccs(acc,
+ secret + XXH_SECRET_MERGEACCS_START,
+ (xxh_u64)state->totalLen * XXH_PRIME64_1);
+ h128.high64 = XXH3_mergeAccs(acc,
+ secret + state->secretLimit + XXH_STRIPE_LEN
+ - sizeof(acc) - XXH_SECRET_MERGEACCS_START,
+ ~((xxh_u64)state->totalLen * XXH_PRIME64_2));
+ return h128;
+ }
+ }
+ /* len <= XXH3_MIDSIZE_MAX : short code */
+ if (state->seed)
+ return XXH3_128bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed);
+ return XXH3_128bits_withSecret(state->buffer, (size_t)(state->totalLen),
+ secret, state->secretLimit + XXH_STRIPE_LEN);
+}
+#endif /* !XXH_NO_STREAM */
+/* 128-bit utility functions */
+
+#include <string.h> /* memcmp, memcpy */
+
+/* return : 1 is equal, 0 if different */
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2)
+{
+ /* note : XXH128_hash_t is compact, it has no padding byte */
+ return !(memcmp(&h1, &h2, sizeof(h1)));
+}
+
+/* This prototype is compatible with stdlib's qsort().
+ * @return : >0 if *h128_1 > *h128_2
+ * <0 if *h128_1 < *h128_2
+ * =0 if *h128_1 == *h128_2 */
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API int XXH128_cmp(XXH_NOESCAPE const void* h128_1, XXH_NOESCAPE const void* h128_2)
+{
+ XXH128_hash_t const h1 = *(const XXH128_hash_t*)h128_1;
+ XXH128_hash_t const h2 = *(const XXH128_hash_t*)h128_2;
+ int const hcmp = (h1.high64 > h2.high64) - (h2.high64 > h1.high64);
+ /* note : bets that, in most cases, hash values are different */
+ if (hcmp) return hcmp;
+ return (h1.low64 > h2.low64) - (h2.low64 > h1.low64);
+}
+
+
+/*====== Canonical representation ======*/
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API void
+XXH128_canonicalFromHash(XXH_NOESCAPE XXH128_canonical_t* dst, XXH128_hash_t hash)
+{
+ XXH_STATIC_ASSERT(sizeof(XXH128_canonical_t) == sizeof(XXH128_hash_t));
+ if (XXH_CPU_LITTLE_ENDIAN) {
+ hash.high64 = XXH_swap64(hash.high64);
+ hash.low64 = XXH_swap64(hash.low64);
+ }
+ XXH_memcpy(dst, &hash.high64, sizeof(hash.high64));
+ XXH_memcpy((char*)dst + sizeof(hash.high64), &hash.low64, sizeof(hash.low64));
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH128_hash_t
+XXH128_hashFromCanonical(XXH_NOESCAPE const XXH128_canonical_t* src)
+{
+ XXH128_hash_t h;
+ h.high64 = XXH_readBE64(src);
+ h.low64 = XXH_readBE64(src->digest + 8);
+ return h;
+}
+
+
+
+/* ==========================================
+ * Secret generators
+ * ==========================================
+ */
+#define XXH_MIN(x, y) (((x) > (y)) ? (y) : (x))
+
+XXH_FORCE_INLINE void XXH3_combine16(void* dst, XXH128_hash_t h128)
+{
+ XXH_writeLE64( dst, XXH_readLE64(dst) ^ h128.low64 );
+ XXH_writeLE64( (char*)dst+8, XXH_readLE64((char*)dst+8) ^ h128.high64 );
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_generateSecret(XXH_NOESCAPE void* secretBuffer, size_t secretSize, XXH_NOESCAPE const void* customSeed, size_t customSeedSize)
+{
+#if (XXH_DEBUGLEVEL >= 1)
+ XXH_ASSERT(secretBuffer != NULL);
+ XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
+#else
+ /* production mode, assert() are disabled */
+ if (secretBuffer == NULL) return XXH_ERROR;
+ if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
+#endif
+
+ if (customSeedSize == 0) {
+ customSeed = XXH3_kSecret;
+ customSeedSize = XXH_SECRET_DEFAULT_SIZE;
+ }
+#if (XXH_DEBUGLEVEL >= 1)
+ XXH_ASSERT(customSeed != NULL);
+#else
+ if (customSeed == NULL) return XXH_ERROR;
+#endif
+
+ /* Fill secretBuffer with a copy of customSeed - repeat as needed */
+ { size_t pos = 0;
+ while (pos < secretSize) {
+ size_t const toCopy = XXH_MIN((secretSize - pos), customSeedSize);
+ memcpy((char*)secretBuffer + pos, customSeed, toCopy);
+ pos += toCopy;
+ } }
+
+ { size_t const nbSeg16 = secretSize / 16;
+ size_t n;
+ XXH128_canonical_t scrambler;
+ XXH128_canonicalFromHash(&scrambler, XXH128(customSeed, customSeedSize, 0));
+ for (n=0; n<nbSeg16; n++) {
+ XXH128_hash_t const h128 = XXH128(&scrambler, sizeof(scrambler), n);
+ XXH3_combine16((char*)secretBuffer + n*16, h128);
+ }
+ /* last segment */
+ XXH3_combine16((char*)secretBuffer + secretSize - 16, XXH128_hashFromCanonical(&scrambler));
+ }
+ return XXH_OK;
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API void
+XXH3_generateSecret_fromSeed(XXH_NOESCAPE void* secretBuffer, XXH64_hash_t seed)
+{
+ XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
+ XXH3_initCustomSecret(secret, seed);
+ XXH_ASSERT(secretBuffer != NULL);
+ memcpy(secretBuffer, secret, XXH_SECRET_DEFAULT_SIZE);
+}
+
+
+
+/* Pop our optimization override from above */
+#if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \
+ && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
+ && defined(__OPTIMIZE__) && XXH_SIZE_OPT <= 0 /* respect -O0 and -Os */
+# pragma GCC pop_options
+#endif
+
+#endif /* XXH_NO_LONG_LONG */
+
+#endif /* XXH_NO_XXH3 */
+
+/*!
+ * @}
+ */
+#endif /* XXH_IMPLEMENTATION */
+
+
+#if defined (__cplusplus)
+} /* extern "C" */
+#endif