summaryrefslogtreecommitdiffstats
path: root/grub-core/lib/zstd
diff options
context:
space:
mode:
Diffstat (limited to 'grub-core/lib/zstd')
-rw-r--r--grub-core/lib/zstd/bitstream.h458
-rw-r--r--grub-core/lib/zstd/compiler.h133
-rw-r--r--grub-core/lib/zstd/cpu.h215
-rw-r--r--grub-core/lib/zstd/debug.c44
-rw-r--r--grub-core/lib/zstd/debug.h123
-rw-r--r--grub-core/lib/zstd/entropy_common.c236
-rw-r--r--grub-core/lib/zstd/error_private.c48
-rw-r--r--grub-core/lib/zstd/error_private.h76
-rw-r--r--grub-core/lib/zstd/fse.h708
-rw-r--r--grub-core/lib/zstd/fse_decompress.c309
-rw-r--r--grub-core/lib/zstd/huf.h334
-rw-r--r--grub-core/lib/zstd/huf_decompress.c1096
-rw-r--r--grub-core/lib/zstd/mem.h374
-rw-r--r--grub-core/lib/zstd/module.c21
-rw-r--r--grub-core/lib/zstd/xxhash.c876
-rw-r--r--grub-core/lib/zstd/xxhash.h305
-rw-r--r--grub-core/lib/zstd/zstd.h1516
-rw-r--r--grub-core/lib/zstd/zstd_common.c81
-rw-r--r--grub-core/lib/zstd/zstd_decompress.c3108
-rw-r--r--grub-core/lib/zstd/zstd_errors.h92
-rw-r--r--grub-core/lib/zstd/zstd_internal.h257
21 files changed, 10410 insertions, 0 deletions
diff --git a/grub-core/lib/zstd/bitstream.h b/grub-core/lib/zstd/bitstream.h
new file mode 100644
index 0000000..2f91460
--- /dev/null
+++ b/grub-core/lib/zstd/bitstream.h
@@ -0,0 +1,458 @@
+/* ******************************************************************
+ bitstream
+ Part of FSE library
+ Copyright (C) 2013-present, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+****************************************************************** */
+#ifndef BITSTREAM_H_MODULE
+#define BITSTREAM_H_MODULE
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/*
+* This API consists of small unitary functions, which must be inlined for best performance.
+* Since link-time-optimization is not available for all compilers,
+* these functions are defined into a .h to be included.
+*/
+
+/*-****************************************
+* Dependencies
+******************************************/
+#include "mem.h" /* unaligned access routines */
+#include "debug.h" /* assert(), DEBUGLOG(), RAWLOG() */
+#include "error_private.h" /* error codes and messages */
+
+
+/*=========================================
+* Target specific
+=========================================*/
+#if defined(__BMI__) && defined(__GNUC__)
+# include <immintrin.h> /* support for bextr (experimental) */
+#endif
+
+#define STREAM_ACCUMULATOR_MIN_32 25
+#define STREAM_ACCUMULATOR_MIN_64 57
+#define STREAM_ACCUMULATOR_MIN ((U32)(MEM_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64))
+
+
+/*-******************************************
+* bitStream encoding API (write forward)
+********************************************/
+/* bitStream can mix input from multiple sources.
+ * A critical property of these streams is that they encode and decode in **reverse** direction.
+ * So the first bit sequence you add will be the last to be read, like a LIFO stack.
+ */
+typedef struct {
+ size_t bitContainer;
+ unsigned bitPos;
+ char* startPtr;
+ char* ptr;
+ char* endPtr;
+} BIT_CStream_t;
+
+MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, void* dstBuffer, size_t dstCapacity);
+MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, size_t value, unsigned nbBits);
+MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC);
+MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC);
+
+/* Start with initCStream, providing the size of buffer to write into.
+* bitStream will never write outside of this buffer.
+* `dstCapacity` must be >= sizeof(bitD->bitContainer), otherwise @return will be an error code.
+*
+* bits are first added to a local register.
+* Local register is size_t, hence 64-bits on 64-bits systems, or 32-bits on 32-bits systems.
+* Writing data into memory is an explicit operation, performed by the flushBits function.
+* Hence keep track how many bits are potentially stored into local register to avoid register overflow.
+* After a flushBits, a maximum of 7 bits might still be stored into local register.
+*
+* Avoid storing elements of more than 24 bits if you want compatibility with 32-bits bitstream readers.
+*
+* Last operation is to close the bitStream.
+* The function returns the final size of CStream in bytes.
+* If data couldn't fit into `dstBuffer`, it will return a 0 ( == not storable)
+*/
+
+
+/*-********************************************
+* bitStream decoding API (read backward)
+**********************************************/
+typedef struct {
+ size_t bitContainer;
+ unsigned bitsConsumed;
+ const char* ptr;
+ const char* start;
+ const char* limitPtr;
+} BIT_DStream_t;
+
+typedef enum { BIT_DStream_unfinished = 0,
+ BIT_DStream_endOfBuffer = 1,
+ BIT_DStream_completed = 2,
+ BIT_DStream_overflow = 3 } BIT_DStream_status; /* result of BIT_reloadDStream() */
+ /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
+
+MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
+MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits);
+MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD);
+MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD);
+
+
+/* Start by invoking BIT_initDStream().
+* A chunk of the bitStream is then stored into a local register.
+* Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
+* You can then retrieve bitFields stored into the local register, **in reverse order**.
+* Local register is explicitly reloaded from memory by the BIT_reloadDStream() method.
+* A reload guarantee a minimum of ((8*sizeof(bitD->bitContainer))-7) bits when its result is BIT_DStream_unfinished.
+* Otherwise, it can be less than that, so proceed accordingly.
+* Checking if DStream has reached its end can be performed with BIT_endOfDStream().
+*/
+
+
+/*-****************************************
+* unsafe API
+******************************************/
+MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, size_t value, unsigned nbBits);
+/* faster, but works only if value is "clean", meaning all high bits above nbBits are 0 */
+
+MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC);
+/* unsafe version; does not check buffer overflow */
+
+MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits);
+/* faster, but works only if nbBits >= 1 */
+
+
+
+/*-**************************************************************
+* Internal functions
+****************************************************************/
+MEM_STATIC unsigned BIT_highbit32 (U32 val)
+{
+ assert(val != 0);
+ {
+# if defined(_MSC_VER) /* Visual */
+ unsigned long r=0;
+ _BitScanReverse ( &r, val );
+ return (unsigned) r;
+# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
+ return 31 - __builtin_clz (val);
+# else /* Software version */
+ static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29,
+ 11, 14, 16, 18, 22, 25, 3, 30,
+ 8, 12, 20, 28, 15, 17, 24, 7,
+ 19, 27, 23, 6, 26, 5, 4, 31 };
+ U32 v = val;
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
+# endif
+ }
+}
+
+/*===== Local Constants =====*/
+static const unsigned BIT_mask[] = {
+ 0, 1, 3, 7, 0xF, 0x1F,
+ 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF,
+ 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0x1FFFF,
+ 0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF,
+ 0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF, 0x7FFFFFF, 0xFFFFFFF, 0x1FFFFFFF,
+ 0x3FFFFFFF, 0x7FFFFFFF}; /* up to 31 bits */
+#define BIT_MASK_SIZE (sizeof(BIT_mask) / sizeof(BIT_mask[0]))
+
+/*-**************************************************************
+* bitStream encoding
+****************************************************************/
+/*! BIT_initCStream() :
+ * `dstCapacity` must be > sizeof(size_t)
+ * @return : 0 if success,
+ * otherwise an error code (can be tested using ERR_isError()) */
+MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC,
+ void* startPtr, size_t dstCapacity)
+{
+ bitC->bitContainer = 0;
+ bitC->bitPos = 0;
+ bitC->startPtr = (char*)startPtr;
+ bitC->ptr = bitC->startPtr;
+ bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer);
+ if (dstCapacity <= sizeof(bitC->bitContainer)) return ERROR(dstSize_tooSmall);
+ return 0;
+}
+
+/*! BIT_addBits() :
+ * can add up to 31 bits into `bitC`.
+ * Note : does not check for register overflow ! */
+MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC,
+ size_t value, unsigned nbBits)
+{
+ MEM_STATIC_ASSERT(BIT_MASK_SIZE == 32);
+ assert(nbBits < BIT_MASK_SIZE);
+ assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
+ bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos;
+ bitC->bitPos += nbBits;
+}
+
+/*! BIT_addBitsFast() :
+ * works only if `value` is _clean_,
+ * meaning all high bits above nbBits are 0 */
+MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC,
+ size_t value, unsigned nbBits)
+{
+ assert((value>>nbBits) == 0);
+ assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
+ bitC->bitContainer |= value << bitC->bitPos;
+ bitC->bitPos += nbBits;
+}
+
+/*! BIT_flushBitsFast() :
+ * assumption : bitContainer has not overflowed
+ * unsafe version; does not check buffer overflow */
+MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC)
+{
+ size_t const nbBytes = bitC->bitPos >> 3;
+ assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
+ MEM_writeLEST(bitC->ptr, bitC->bitContainer);
+ bitC->ptr += nbBytes;
+ assert(bitC->ptr <= bitC->endPtr);
+ bitC->bitPos &= 7;
+ bitC->bitContainer >>= nbBytes*8;
+}
+
+/*! BIT_flushBits() :
+ * assumption : bitContainer has not overflowed
+ * safe version; check for buffer overflow, and prevents it.
+ * note : does not signal buffer overflow.
+ * overflow will be revealed later on using BIT_closeCStream() */
+MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC)
+{
+ size_t const nbBytes = bitC->bitPos >> 3;
+ assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
+ MEM_writeLEST(bitC->ptr, bitC->bitContainer);
+ bitC->ptr += nbBytes;
+ if (bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr;
+ bitC->bitPos &= 7;
+ bitC->bitContainer >>= nbBytes*8;
+}
+
+/*! BIT_closeCStream() :
+ * @return : size of CStream, in bytes,
+ * or 0 if it could not fit into dstBuffer */
+MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC)
+{
+ BIT_addBitsFast(bitC, 1, 1); /* endMark */
+ BIT_flushBits(bitC);
+ if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */
+ return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0);
+}
+
+
+/*-********************************************************
+* bitStream decoding
+**********************************************************/
+/*! BIT_initDStream() :
+ * Initialize a BIT_DStream_t.
+ * `bitD` : a pointer to an already allocated BIT_DStream_t structure.
+ * `srcSize` must be the *exact* size of the bitStream, in bytes.
+ * @return : size of stream (== srcSize), or an errorCode if a problem is detected
+ */
+MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
+{
+ if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
+
+ bitD->start = (const char*)srcBuffer;
+ bitD->limitPtr = bitD->start + sizeof(bitD->bitContainer);
+
+ if (srcSize >= sizeof(bitD->bitContainer)) { /* normal case */
+ bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer);
+ bitD->bitContainer = MEM_readLEST(bitD->ptr);
+ { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
+ bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; /* ensures bitsConsumed is always set */
+ if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ }
+ } else {
+ bitD->ptr = bitD->start;
+ bitD->bitContainer = *(const BYTE*)(bitD->start);
+ switch(srcSize)
+ {
+ case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);
+ /* fall-through */
+
+ case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);
+ /* fall-through */
+
+ case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);
+ /* fall-through */
+
+ case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24;
+ /* fall-through */
+
+ case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16;
+ /* fall-through */
+
+ case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8;
+ /* fall-through */
+
+ default: break;
+ }
+ { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
+ bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;
+ if (lastByte == 0) return ERROR(corruption_detected); /* endMark not present */
+ }
+ bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8;
+ }
+
+ return srcSize;
+}
+
+MEM_STATIC size_t BIT_getUpperBits(size_t bitContainer, U32 const start)
+{
+ return bitContainer >> start;
+}
+
+MEM_STATIC size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits)
+{
+#if defined(__BMI__) && defined(__GNUC__) && __GNUC__*1000+__GNUC_MINOR__ >= 4008 /* experimental */
+# if defined(__x86_64__)
+ if (sizeof(bitContainer)==8)
+ return _bextr_u64(bitContainer, start, nbBits);
+ else
+# endif
+ return _bextr_u32(bitContainer, start, nbBits);
+#else
+ assert(nbBits < BIT_MASK_SIZE);
+ return (bitContainer >> start) & BIT_mask[nbBits];
+#endif
+}
+
+MEM_STATIC size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits)
+{
+ assert(nbBits < BIT_MASK_SIZE);
+ return bitContainer & BIT_mask[nbBits];
+}
+
+/*! BIT_lookBits() :
+ * Provides next n bits from local register.
+ * local register is not modified.
+ * On 32-bits, maxNbBits==24.
+ * On 64-bits, maxNbBits==56.
+ * @return : value extracted */
+MEM_STATIC size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits)
+{
+#if defined(__BMI__) && defined(__GNUC__) /* experimental; fails if bitD->bitsConsumed + nbBits > sizeof(bitD->bitContainer)*8 */
+ return BIT_getMiddleBits(bitD->bitContainer, (sizeof(bitD->bitContainer)*8) - bitD->bitsConsumed - nbBits, nbBits);
+#else
+ U32 const regMask = sizeof(bitD->bitContainer)*8 - 1;
+ return ((bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> 1) >> ((regMask-nbBits) & regMask);
+#endif
+}
+
+/*! BIT_lookBitsFast() :
+ * unsafe version; only works if nbBits >= 1 */
+MEM_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits)
+{
+ U32 const regMask = sizeof(bitD->bitContainer)*8 - 1;
+ assert(nbBits >= 1);
+ return (bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> (((regMask+1)-nbBits) & regMask);
+}
+
+MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
+{
+ bitD->bitsConsumed += nbBits;
+}
+
+/*! BIT_readBits() :
+ * Read (consume) next n bits from local register and update.
+ * Pay attention to not read more than nbBits contained into local register.
+ * @return : extracted value. */
+MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits)
+{
+ size_t const value = BIT_lookBits(bitD, nbBits);
+ BIT_skipBits(bitD, nbBits);
+ return value;
+}
+
+/*! BIT_readBitsFast() :
+ * unsafe version; only works only if nbBits >= 1 */
+MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits)
+{
+ size_t const value = BIT_lookBitsFast(bitD, nbBits);
+ assert(nbBits >= 1);
+ BIT_skipBits(bitD, nbBits);
+ return value;
+}
+
+/*! BIT_reloadDStream() :
+ * Refill `bitD` from buffer previously set in BIT_initDStream() .
+ * This function is safe, it guarantees it will not read beyond src buffer.
+ * @return : status of `BIT_DStream_t` internal register.
+ * when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */
+MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
+{
+ if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* overflow detected, like end of stream */
+ return BIT_DStream_overflow;
+
+ if (bitD->ptr >= bitD->limitPtr) {
+ bitD->ptr -= bitD->bitsConsumed >> 3;
+ bitD->bitsConsumed &= 7;
+ bitD->bitContainer = MEM_readLEST(bitD->ptr);
+ return BIT_DStream_unfinished;
+ }
+ if (bitD->ptr == bitD->start) {
+ if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;
+ return BIT_DStream_completed;
+ }
+ /* start < ptr < limitPtr */
+ { U32 nbBytes = bitD->bitsConsumed >> 3;
+ BIT_DStream_status result = BIT_DStream_unfinished;
+ if (bitD->ptr - nbBytes < bitD->start) {
+ nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */
+ result = BIT_DStream_endOfBuffer;
+ }
+ bitD->ptr -= nbBytes;
+ bitD->bitsConsumed -= nbBytes*8;
+ bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD->bitContainer), otherwise bitD->ptr == bitD->start */
+ return result;
+ }
+}
+
+/*! BIT_endOfDStream() :
+ * @return : 1 if DStream has _exactly_ reached its end (all bits consumed).
+ */
+MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream)
+{
+ return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
+}
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* BITSTREAM_H_MODULE */
diff --git a/grub-core/lib/zstd/compiler.h b/grub-core/lib/zstd/compiler.h
new file mode 100644
index 0000000..07f875e
--- /dev/null
+++ b/grub-core/lib/zstd/compiler.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_COMPILER_H
+#define ZSTD_COMPILER_H
+
+/*-*******************************************************
+* Compiler specifics
+*********************************************************/
+/* force inlining */
+#if defined (__GNUC__) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
+# define INLINE_KEYWORD inline
+#else
+# define INLINE_KEYWORD
+#endif
+
+#if defined(__GNUC__)
+# define FORCE_INLINE_ATTR __attribute__((always_inline))
+#elif defined(_MSC_VER)
+# define FORCE_INLINE_ATTR __forceinline
+#else
+# define FORCE_INLINE_ATTR
+#endif
+
+/**
+ * FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant
+ * parameters. They must be inlined for the compiler to elimininate the constant
+ * branches.
+ */
+#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR
+/**
+ * HINT_INLINE is used to help the compiler generate better code. It is *not*
+ * used for "templates", so it can be tweaked based on the compilers
+ * performance.
+ *
+ * gcc-4.8 and gcc-4.9 have been shown to benefit from leaving off the
+ * always_inline attribute.
+ *
+ * clang up to 5.0.0 (trunk) benefit tremendously from the always_inline
+ * attribute.
+ */
+#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 8 && __GNUC__ < 5
+# define HINT_INLINE static INLINE_KEYWORD
+#else
+# define HINT_INLINE static INLINE_KEYWORD FORCE_INLINE_ATTR
+#endif
+
+/* force no inlining */
+#ifdef _MSC_VER
+# define FORCE_NOINLINE static __declspec(noinline)
+#else
+# ifdef __GNUC__
+# define FORCE_NOINLINE static __attribute__((__noinline__))
+# else
+# define FORCE_NOINLINE static
+# endif
+#endif
+
+/* target attribute */
+#ifndef __has_attribute
+ #define __has_attribute(x) 0 /* Compatibility with non-clang compilers. */
+#endif
+#if defined(__GNUC__)
+# define TARGET_ATTRIBUTE(target) __attribute__((__target__(target)))
+#else
+# define TARGET_ATTRIBUTE(target)
+#endif
+
+/* Enable runtime BMI2 dispatch based on the CPU.
+ * Enabled for clang & gcc >=4.8 on x86 when BMI2 isn't enabled by default.
+ */
+#ifndef DYNAMIC_BMI2
+ #if ((defined(__clang__) && __has_attribute(__target__)) \
+ || (defined(__GNUC__) \
+ && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)))) \
+ && (defined(__x86_64__) || defined(_M_X86)) \
+ && !defined(__BMI2__)
+ # define DYNAMIC_BMI2 1
+ #else
+ # define DYNAMIC_BMI2 0
+ #endif
+#endif
+
+/* prefetch
+ * can be disabled, by declaring NO_PREFETCH macro
+ * All prefetch invocations use a single default locality 2,
+ * generating instruction prefetcht1,
+ * which, according to Intel, means "load data into L2 cache".
+ * This is a good enough "middle ground" for the time being,
+ * though in theory, it would be better to specialize locality depending on data being prefetched.
+ * Tests could not determine any sensible difference based on locality value. */
+#if defined(NO_PREFETCH)
+# define PREFETCH(ptr) (void)(ptr) /* disabled */
+#else
+# if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86)) /* _mm_prefetch() is not defined outside of x86/x64 */
+# include <mmintrin.h> /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
+# define PREFETCH(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T1)
+# elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )
+# define PREFETCH(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 2 /* locality */)
+# else
+# define PREFETCH(ptr) (void)(ptr) /* disabled */
+# endif
+#endif /* NO_PREFETCH */
+
+#define CACHELINE_SIZE 64
+
+#define PREFETCH_AREA(p, s) { \
+ const char* const _ptr = (const char*)(p); \
+ size_t const _size = (size_t)(s); \
+ size_t _pos; \
+ for (_pos=0; _pos<_size; _pos+=CACHELINE_SIZE) { \
+ PREFETCH(_ptr + _pos); \
+ } \
+}
+
+/* disable warnings */
+#ifdef _MSC_VER /* Visual Studio */
+# include <intrin.h> /* For Visual 2005 */
+# pragma warning(disable : 4100) /* disable: C4100: unreferenced formal parameter */
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */
+# pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */
+# pragma warning(disable : 4324) /* disable: C4324: padded structure */
+#endif
+
+#endif /* ZSTD_COMPILER_H */
diff --git a/grub-core/lib/zstd/cpu.h b/grub-core/lib/zstd/cpu.h
new file mode 100644
index 0000000..eeb428a
--- /dev/null
+++ b/grub-core/lib/zstd/cpu.h
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2018-present, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_COMMON_CPU_H
+#define ZSTD_COMMON_CPU_H
+
+/**
+ * Implementation taken from folly/CpuId.h
+ * https://github.com/facebook/folly/blob/master/folly/CpuId.h
+ */
+
+#include <string.h>
+
+#include "mem.h"
+
+#ifdef _MSC_VER
+#include <intrin.h>
+#endif
+
+typedef struct {
+ U32 f1c;
+ U32 f1d;
+ U32 f7b;
+ U32 f7c;
+} ZSTD_cpuid_t;
+
+MEM_STATIC ZSTD_cpuid_t ZSTD_cpuid(void) {
+ U32 f1c = 0;
+ U32 f1d = 0;
+ U32 f7b = 0;
+ U32 f7c = 0;
+#if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86))
+ int reg[4];
+ __cpuid((int*)reg, 0);
+ {
+ int const n = reg[0];
+ if (n >= 1) {
+ __cpuid((int*)reg, 1);
+ f1c = (U32)reg[2];
+ f1d = (U32)reg[3];
+ }
+ if (n >= 7) {
+ __cpuidex((int*)reg, 7, 0);
+ f7b = (U32)reg[1];
+ f7c = (U32)reg[2];
+ }
+ }
+#elif defined(__i386__) && defined(__PIC__) && !defined(__clang__) && defined(__GNUC__)
+ /* The following block like the normal cpuid branch below, but gcc
+ * reserves ebx for use of its pic register so we must specially
+ * handle the save and restore to avoid clobbering the register
+ */
+ U32 n;
+ __asm__(
+ "pushl %%ebx\n\t"
+ "cpuid\n\t"
+ "popl %%ebx\n\t"
+ : "=a"(n)
+ : "a"(0)
+ : "ecx", "edx");
+ if (n >= 1) {
+ U32 f1a;
+ __asm__(
+ "pushl %%ebx\n\t"
+ "cpuid\n\t"
+ "popl %%ebx\n\t"
+ : "=a"(f1a), "=c"(f1c), "=d"(f1d)
+ : "a"(1));
+ }
+ if (n >= 7) {
+ __asm__(
+ "pushl %%ebx\n\t"
+ "cpuid\n\t"
+ "movl %%ebx, %%eax\n\r"
+ "popl %%ebx"
+ : "=a"(f7b), "=c"(f7c)
+ : "a"(7), "c"(0)
+ : "edx");
+ }
+#elif defined(__x86_64__) || defined(_M_X64) || defined(__i386__)
+ U32 n;
+ __asm__("cpuid" : "=a"(n) : "a"(0) : "ebx", "ecx", "edx");
+ if (n >= 1) {
+ U32 f1a;
+ __asm__("cpuid" : "=a"(f1a), "=c"(f1c), "=d"(f1d) : "a"(1) : "ebx");
+ }
+ if (n >= 7) {
+ U32 f7a;
+ __asm__("cpuid"
+ : "=a"(f7a), "=b"(f7b), "=c"(f7c)
+ : "a"(7), "c"(0)
+ : "edx");
+ }
+#endif
+ {
+ ZSTD_cpuid_t cpuid;
+ cpuid.f1c = f1c;
+ cpuid.f1d = f1d;
+ cpuid.f7b = f7b;
+ cpuid.f7c = f7c;
+ return cpuid;
+ }
+}
+
+#define X(name, r, bit) \
+ MEM_STATIC int ZSTD_cpuid_##name(ZSTD_cpuid_t const cpuid) { \
+ return ((cpuid.r) & (1U << bit)) != 0; \
+ }
+
+/* cpuid(1): Processor Info and Feature Bits. */
+#define C(name, bit) X(name, f1c, bit)
+ C(sse3, 0)
+ C(pclmuldq, 1)
+ C(dtes64, 2)
+ C(monitor, 3)
+ C(dscpl, 4)
+ C(vmx, 5)
+ C(smx, 6)
+ C(eist, 7)
+ C(tm2, 8)
+ C(ssse3, 9)
+ C(cnxtid, 10)
+ C(fma, 12)
+ C(cx16, 13)
+ C(xtpr, 14)
+ C(pdcm, 15)
+ C(pcid, 17)
+ C(dca, 18)
+ C(sse41, 19)
+ C(sse42, 20)
+ C(x2apic, 21)
+ C(movbe, 22)
+ C(popcnt, 23)
+ C(tscdeadline, 24)
+ C(aes, 25)
+ C(xsave, 26)
+ C(osxsave, 27)
+ C(avx, 28)
+ C(f16c, 29)
+ C(rdrand, 30)
+#undef C
+#define D(name, bit) X(name, f1d, bit)
+ D(fpu, 0)
+ D(vme, 1)
+ D(de, 2)
+ D(pse, 3)
+ D(tsc, 4)
+ D(msr, 5)
+ D(pae, 6)
+ D(mce, 7)
+ D(cx8, 8)
+ D(apic, 9)
+ D(sep, 11)
+ D(mtrr, 12)
+ D(pge, 13)
+ D(mca, 14)
+ D(cmov, 15)
+ D(pat, 16)
+ D(pse36, 17)
+ D(psn, 18)
+ D(clfsh, 19)
+ D(ds, 21)
+ D(acpi, 22)
+ D(mmx, 23)
+ D(fxsr, 24)
+ D(sse, 25)
+ D(sse2, 26)
+ D(ss, 27)
+ D(htt, 28)
+ D(tm, 29)
+ D(pbe, 31)
+#undef D
+
+/* cpuid(7): Extended Features. */
+#define B(name, bit) X(name, f7b, bit)
+ B(bmi1, 3)
+ B(hle, 4)
+ B(avx2, 5)
+ B(smep, 7)
+ B(bmi2, 8)
+ B(erms, 9)
+ B(invpcid, 10)
+ B(rtm, 11)
+ B(mpx, 14)
+ B(avx512f, 16)
+ B(avx512dq, 17)
+ B(rdseed, 18)
+ B(adx, 19)
+ B(smap, 20)
+ B(avx512ifma, 21)
+ B(pcommit, 22)
+ B(clflushopt, 23)
+ B(clwb, 24)
+ B(avx512pf, 26)
+ B(avx512er, 27)
+ B(avx512cd, 28)
+ B(sha, 29)
+ B(avx512bw, 30)
+ B(avx512vl, 31)
+#undef B
+#define C(name, bit) X(name, f7c, bit)
+ C(prefetchwt1, 0)
+ C(avx512vbmi, 1)
+#undef C
+
+#undef X
+
+#endif /* ZSTD_COMMON_CPU_H */
diff --git a/grub-core/lib/zstd/debug.c b/grub-core/lib/zstd/debug.c
new file mode 100644
index 0000000..3ebdd1c
--- /dev/null
+++ b/grub-core/lib/zstd/debug.c
@@ -0,0 +1,44 @@
+/* ******************************************************************
+ debug
+ Part of FSE library
+ Copyright (C) 2013-present, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+****************************************************************** */
+
+
+/*
+ * This module only hosts one global variable
+ * which can be used to dynamically influence the verbosity of traces,
+ * such as DEBUGLOG and RAWLOG
+ */
+
+#include "debug.h"
+
+int g_debuglevel = DEBUGLEVEL;
diff --git a/grub-core/lib/zstd/debug.h b/grub-core/lib/zstd/debug.h
new file mode 100644
index 0000000..0c04ad2
--- /dev/null
+++ b/grub-core/lib/zstd/debug.h
@@ -0,0 +1,123 @@
+/* ******************************************************************
+ debug
+ Part of FSE library
+ Copyright (C) 2013-present, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+****************************************************************** */
+
+
+/*
+ * The purpose of this header is to enable debug functions.
+ * They regroup assert(), DEBUGLOG() and RAWLOG() for run-time,
+ * and DEBUG_STATIC_ASSERT() for compile-time.
+ *
+ * By default, DEBUGLEVEL==0, which means run-time debug is disabled.
+ *
+ * Level 1 enables assert() only.
+ * Starting level 2, traces can be generated and pushed to stderr.
+ * The higher the level, the more verbose the traces.
+ *
+ * It's possible to dynamically adjust level using variable g_debug_level,
+ * which is only declared if DEBUGLEVEL>=2,
+ * and is a global variable, not multi-thread protected (use with care)
+ */
+
+#ifndef DEBUG_H_12987983217
+#define DEBUG_H_12987983217
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/* static assert is triggered at compile time, leaving no runtime artefact,
+ * but can only work with compile-time constants.
+ * This variant can only be used inside a function. */
+#define DEBUG_STATIC_ASSERT(c) (void)sizeof(char[(c) ? 1 : -1])
+
+
+/* DEBUGLEVEL is expected to be defined externally,
+ * typically through compiler command line.
+ * Value must be a number. */
+#ifndef DEBUGLEVEL
+# define DEBUGLEVEL 0
+#endif
+
+/* recommended values for DEBUGLEVEL :
+ * 0 : no debug, all run-time functions disabled
+ * 1 : no display, enables assert() only
+ * 2 : reserved, for currently active debug path
+ * 3 : events once per object lifetime (CCtx, CDict, etc.)
+ * 4 : events once per frame
+ * 5 : events once per block
+ * 6 : events once per sequence (verbose)
+ * 7+: events at every position (*very* verbose)
+ *
+ * It's generally inconvenient to output traces > 5.
+ * In which case, it's possible to selectively enable higher verbosity levels
+ * by modifying g_debug_level.
+ */
+
+#if (DEBUGLEVEL>=1)
+# include <assert.h>
+#else
+# ifndef assert /* assert may be already defined, due to prior #include <assert.h> */
+# define assert(condition) ((void)0) /* disable assert (default) */
+# endif
+#endif
+
+#if (DEBUGLEVEL>=2)
+# include <stdio.h>
+extern int g_debuglevel; /* here, this variable is only declared,
+ it actually lives in debug.c,
+ and is shared by the whole process.
+ It's typically used to enable very verbose levels
+ on selective conditions (such as position in src) */
+
+# define RAWLOG(l, ...) { \
+ if (l<=g_debuglevel) { \
+ fprintf(stderr, __VA_ARGS__); \
+ } }
+# define DEBUGLOG(l, ...) { \
+ if (l<=g_debuglevel) { \
+ fprintf(stderr, __FILE__ ": " __VA_ARGS__); \
+ fprintf(stderr, " \n"); \
+ } }
+#else
+# define RAWLOG(l, ...) {} /* disabled */
+# define DEBUGLOG(l, ...) {} /* disabled */
+#endif
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* DEBUG_H_12987983217 */
diff --git a/grub-core/lib/zstd/entropy_common.c b/grub-core/lib/zstd/entropy_common.c
new file mode 100644
index 0000000..b12944e
--- /dev/null
+++ b/grub-core/lib/zstd/entropy_common.c
@@ -0,0 +1,236 @@
+/*
+ Common functions of New Generation Entropy library
+ Copyright (C) 2016, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ - Public forum : https://groups.google.com/forum/#!forum/lz4c
+*************************************************************************** */
+
+/* *************************************
+* Dependencies
+***************************************/
+#include "mem.h"
+#include "error_private.h" /* ERR_*, ERROR */
+#define FSE_STATIC_LINKING_ONLY /* FSE_MIN_TABLELOG */
+#include "fse.h"
+#define HUF_STATIC_LINKING_ONLY /* HUF_TABLELOG_ABSOLUTEMAX */
+#include "huf.h"
+
+
+/*=== Version ===*/
+unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; }
+
+
+/*=== Error Management ===*/
+unsigned FSE_isError(size_t code) { return ERR_isError(code); }
+const char* FSE_getErrorName(size_t code) { return ERR_getErrorName(code); }
+
+unsigned HUF_isError(size_t code) { return ERR_isError(code); }
+const char* HUF_getErrorName(size_t code) { return ERR_getErrorName(code); }
+
+
+/*-**************************************************************
+* FSE NCount encoding-decoding
+****************************************************************/
+size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
+ const void* headerBuffer, size_t hbSize)
+{
+ const BYTE* const istart = (const BYTE*) headerBuffer;
+ const BYTE* const iend = istart + hbSize;
+ const BYTE* ip = istart;
+ int nbBits;
+ int remaining;
+ int threshold;
+ U32 bitStream;
+ int bitCount;
+ unsigned charnum = 0;
+ int previous0 = 0;
+
+ if (hbSize < 4) {
+ /* This function only works when hbSize >= 4 */
+ char buffer[4];
+ memset(buffer, 0, sizeof(buffer));
+ memcpy(buffer, headerBuffer, hbSize);
+ { size_t const countSize = FSE_readNCount(normalizedCounter, maxSVPtr, tableLogPtr,
+ buffer, sizeof(buffer));
+ if (FSE_isError(countSize)) return countSize;
+ if (countSize > hbSize) return ERROR(corruption_detected);
+ return countSize;
+ } }
+ assert(hbSize >= 4);
+
+ /* init */
+ memset(normalizedCounter, 0, (*maxSVPtr+1) * sizeof(normalizedCounter[0])); /* all symbols not present in NCount have a frequency of 0 */
+ bitStream = MEM_readLE32(ip);
+ nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */
+ if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
+ bitStream >>= 4;
+ bitCount = 4;
+ *tableLogPtr = nbBits;
+ remaining = (1<<nbBits)+1;
+ threshold = 1<<nbBits;
+ nbBits++;
+
+ while ((remaining>1) & (charnum<=*maxSVPtr)) {
+ if (previous0) {
+ unsigned n0 = charnum;
+ while ((bitStream & 0xFFFF) == 0xFFFF) {
+ n0 += 24;
+ if (ip < iend-5) {
+ ip += 2;
+ bitStream = MEM_readLE32(ip) >> bitCount;
+ } else {
+ bitStream >>= 16;
+ bitCount += 16;
+ } }
+ while ((bitStream & 3) == 3) {
+ n0 += 3;
+ bitStream >>= 2;
+ bitCount += 2;
+ }
+ n0 += bitStream & 3;
+ bitCount += 2;
+ if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);
+ while (charnum < n0) normalizedCounter[charnum++] = 0;
+ if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
+ assert((bitCount >> 3) <= 3); /* For first condition to work */
+ ip += bitCount>>3;
+ bitCount &= 7;
+ bitStream = MEM_readLE32(ip) >> bitCount;
+ } else {
+ bitStream >>= 2;
+ } }
+ { int const max = (2*threshold-1) - remaining;
+ int count;
+
+ if ((bitStream & (threshold-1)) < (U32)max) {
+ count = bitStream & (threshold-1);
+ bitCount += nbBits-1;
+ } else {
+ count = bitStream & (2*threshold-1);
+ if (count >= threshold) count -= max;
+ bitCount += nbBits;
+ }
+
+ count--; /* extra accuracy */
+ remaining -= count < 0 ? -count : count; /* -1 means +1 */
+ normalizedCounter[charnum++] = (short)count;
+ previous0 = !count;
+ while (remaining < threshold) {
+ nbBits--;
+ threshold >>= 1;
+ }
+
+ if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
+ ip += bitCount>>3;
+ bitCount &= 7;
+ } else {
+ bitCount -= (int)(8 * (iend - 4 - ip));
+ ip = iend - 4;
+ }
+ bitStream = MEM_readLE32(ip) >> (bitCount & 31);
+ } } /* while ((remaining>1) & (charnum<=*maxSVPtr)) */
+ if (remaining != 1) return ERROR(corruption_detected);
+ if (bitCount > 32) return ERROR(corruption_detected);
+ *maxSVPtr = charnum-1;
+
+ ip += (bitCount+7)>>3;
+ return ip-istart;
+}
+
+
+/*! HUF_readStats() :
+ Read compact Huffman tree, saved by HUF_writeCTable().
+ `huffWeight` is destination buffer.
+ `rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32.
+ @return : size read from `src` , or an error Code .
+ Note : Needed by HUF_readCTable() and HUF_readDTableX?() .
+*/
+size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
+ U32* nbSymbolsPtr, U32* tableLogPtr,
+ const void* src, size_t srcSize)
+{
+ U32 weightTotal;
+ const BYTE* ip = (const BYTE*) src;
+ size_t iSize;
+ size_t oSize;
+
+ if (!srcSize) return ERROR(srcSize_wrong);
+ iSize = ip[0];
+ /* memset(huffWeight, 0, hwSize); *//* is not necessary, even though some analyzer complain ... */
+
+ if (iSize >= 128) { /* special header */
+ oSize = iSize - 127;
+ iSize = ((oSize+1)/2);
+ if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
+ if (oSize >= hwSize) return ERROR(corruption_detected);
+ ip += 1;
+ { U32 n;
+ for (n=0; n<oSize; n+=2) {
+ huffWeight[n] = ip[n/2] >> 4;
+ huffWeight[n+1] = ip[n/2] & 15;
+ } } }
+ else { /* header compressed with FSE (normal case) */
+ FSE_DTable fseWorkspace[FSE_DTABLE_SIZE_U32(6)]; /* 6 is max possible tableLog for HUF header (maybe even 5, to be tested) */
+ if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
+ oSize = FSE_decompress_wksp(huffWeight, hwSize-1, ip+1, iSize, fseWorkspace, 6); /* max (hwSize-1) values decoded, as last one is implied */
+ if (FSE_isError(oSize)) return oSize;
+ }
+
+ /* collect weight stats */
+ memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32));
+ weightTotal = 0;
+ { U32 n; for (n=0; n<oSize; n++) {
+ if (huffWeight[n] >= HUF_TABLELOG_MAX) return ERROR(corruption_detected);
+ rankStats[huffWeight[n]]++;
+ weightTotal += (1 << huffWeight[n]) >> 1;
+ } }
+ if (weightTotal == 0) return ERROR(corruption_detected);
+
+ /* get last non-null symbol weight (implied, total must be 2^n) */
+ { U32 const tableLog = BIT_highbit32(weightTotal) + 1;
+ if (tableLog > HUF_TABLELOG_MAX) return ERROR(corruption_detected);
+ *tableLogPtr = tableLog;
+ /* determine last weight */
+ { U32 const total = 1 << tableLog;
+ U32 const rest = total - weightTotal;
+ U32 const verif = 1 << BIT_highbit32(rest);
+ U32 const lastWeight = BIT_highbit32(rest) + 1;
+ if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */
+ huffWeight[oSize] = (BYTE)lastWeight;
+ rankStats[lastWeight]++;
+ } }
+
+ /* check tree construction validity */
+ if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */
+
+ /* results */
+ *nbSymbolsPtr = (U32)(oSize+1);
+ return iSize+1;
+}
diff --git a/grub-core/lib/zstd/error_private.c b/grub-core/lib/zstd/error_private.c
new file mode 100644
index 0000000..d004ee6
--- /dev/null
+++ b/grub-core/lib/zstd/error_private.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/* The purpose of this file is to have a single list of error strings embedded in binary */
+
+#include "error_private.h"
+
+const char* ERR_getErrorString(ERR_enum code)
+{
+ static const char* const notErrorCode = "Unspecified error code";
+ switch( code )
+ {
+ case PREFIX(no_error): return "No error detected";
+ case PREFIX(GENERIC): return "Error (generic)";
+ case PREFIX(prefix_unknown): return "Unknown frame descriptor";
+ case PREFIX(version_unsupported): return "Version not supported";
+ case PREFIX(frameParameter_unsupported): return "Unsupported frame parameter";
+ case PREFIX(frameParameter_windowTooLarge): return "Frame requires too much memory for decoding";
+ case PREFIX(corruption_detected): return "Corrupted block detected";
+ case PREFIX(checksum_wrong): return "Restored data doesn't match checksum";
+ case PREFIX(parameter_unsupported): return "Unsupported parameter";
+ case PREFIX(parameter_outOfBound): return "Parameter is out of bound";
+ case PREFIX(init_missing): return "Context should be init first";
+ case PREFIX(memory_allocation): return "Allocation error : not enough memory";
+ case PREFIX(workSpace_tooSmall): return "workSpace buffer is not large enough";
+ case PREFIX(stage_wrong): return "Operation not authorized at current processing stage";
+ case PREFIX(tableLog_tooLarge): return "tableLog requires too much memory : unsupported";
+ case PREFIX(maxSymbolValue_tooLarge): return "Unsupported max Symbol Value : too large";
+ case PREFIX(maxSymbolValue_tooSmall): return "Specified maxSymbolValue is too small";
+ case PREFIX(dictionary_corrupted): return "Dictionary is corrupted";
+ case PREFIX(dictionary_wrong): return "Dictionary mismatch";
+ case PREFIX(dictionaryCreation_failed): return "Cannot create Dictionary from provided samples";
+ case PREFIX(dstSize_tooSmall): return "Destination buffer is too small";
+ case PREFIX(srcSize_wrong): return "Src size is incorrect";
+ /* following error codes are not stable and may be removed or changed in a future version */
+ case PREFIX(frameIndex_tooLarge): return "Frame index is too large";
+ case PREFIX(seekableIO): return "An I/O error occurred when reading/seeking";
+ case PREFIX(maxCode):
+ default: return notErrorCode;
+ }
+}
diff --git a/grub-core/lib/zstd/error_private.h b/grub-core/lib/zstd/error_private.h
new file mode 100644
index 0000000..0d2fa7e
--- /dev/null
+++ b/grub-core/lib/zstd/error_private.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/* Note : this module is expected to remain private, do not expose it */
+
+#ifndef ERROR_H_MODULE
+#define ERROR_H_MODULE
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/* ****************************************
+* Dependencies
+******************************************/
+#include <stddef.h> /* size_t */
+#include "zstd_errors.h" /* enum list */
+
+
+/* ****************************************
+* Compiler-specific
+******************************************/
+#if defined(__GNUC__)
+# define ERR_STATIC static __attribute__((unused))
+#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+# define ERR_STATIC static inline
+#elif defined(_MSC_VER)
+# define ERR_STATIC static __inline
+#else
+# define ERR_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
+#endif
+
+
+/*-****************************************
+* Customization (error_public.h)
+******************************************/
+typedef ZSTD_ErrorCode ERR_enum;
+#define PREFIX(name) ZSTD_error_##name
+
+
+/*-****************************************
+* Error codes handling
+******************************************/
+#undef ERROR /* reported already defined on VS 2015 (Rich Geldreich) */
+#define ERROR(name) ZSTD_ERROR(name)
+#define ZSTD_ERROR(name) ((size_t)-PREFIX(name))
+
+ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); }
+
+ERR_STATIC ERR_enum ERR_getErrorCode(size_t code) { if (!ERR_isError(code)) return (ERR_enum)0; return (ERR_enum) (0-code); }
+
+
+/*-****************************************
+* Error Strings
+******************************************/
+
+const char* ERR_getErrorString(ERR_enum code); /* error_private.c */
+
+ERR_STATIC const char* ERR_getErrorName(size_t code)
+{
+ return ERR_getErrorString(ERR_getErrorCode(code));
+}
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ERROR_H_MODULE */
diff --git a/grub-core/lib/zstd/fse.h b/grub-core/lib/zstd/fse.h
new file mode 100644
index 0000000..a5a6b6d
--- /dev/null
+++ b/grub-core/lib/zstd/fse.h
@@ -0,0 +1,708 @@
+/* ******************************************************************
+ FSE : Finite State Entropy codec
+ Public Prototypes declaration
+ Copyright (C) 2013-2016, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+****************************************************************** */
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#ifndef FSE_H
+#define FSE_H
+
+
+/*-*****************************************
+* Dependencies
+******************************************/
+#include <stddef.h> /* size_t, ptrdiff_t */
+
+
+/*-*****************************************
+* FSE_PUBLIC_API : control library symbols visibility
+******************************************/
+#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4)
+# define FSE_PUBLIC_API __attribute__ ((visibility ("default")))
+#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */
+# define FSE_PUBLIC_API __declspec(dllexport)
+#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1)
+# define FSE_PUBLIC_API __declspec(dllimport) /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
+#else
+# define FSE_PUBLIC_API
+#endif
+
+/*------ Version ------*/
+#define FSE_VERSION_MAJOR 0
+#define FSE_VERSION_MINOR 9
+#define FSE_VERSION_RELEASE 0
+
+#define FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE
+#define FSE_QUOTE(str) #str
+#define FSE_EXPAND_AND_QUOTE(str) FSE_QUOTE(str)
+#define FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION)
+
+#define FSE_VERSION_NUMBER (FSE_VERSION_MAJOR *100*100 + FSE_VERSION_MINOR *100 + FSE_VERSION_RELEASE)
+FSE_PUBLIC_API unsigned FSE_versionNumber(void); /**< library version number; to be used when checking dll version */
+
+
+/*-****************************************
+* FSE simple functions
+******************************************/
+/*! FSE_compress() :
+ Compress content of buffer 'src', of size 'srcSize', into destination buffer 'dst'.
+ 'dst' buffer must be already allocated. Compression runs faster is dstCapacity >= FSE_compressBound(srcSize).
+ @return : size of compressed data (<= dstCapacity).
+ Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!
+ if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression instead.
+ if FSE_isError(return), compression failed (more details using FSE_getErrorName())
+*/
+FSE_PUBLIC_API size_t FSE_compress(void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize);
+
+/*! FSE_decompress():
+ Decompress FSE data from buffer 'cSrc', of size 'cSrcSize',
+ into already allocated destination buffer 'dst', of size 'dstCapacity'.
+ @return : size of regenerated data (<= maxDstSize),
+ or an error code, which can be tested using FSE_isError() .
+
+ ** Important ** : FSE_decompress() does not decompress non-compressible nor RLE data !!!
+ Why ? : making this distinction requires a header.
+ Header management is intentionally delegated to the user layer, which can better manage special cases.
+*/
+FSE_PUBLIC_API size_t FSE_decompress(void* dst, size_t dstCapacity,
+ const void* cSrc, size_t cSrcSize);
+
+
+/*-*****************************************
+* Tool functions
+******************************************/
+FSE_PUBLIC_API size_t FSE_compressBound(size_t size); /* maximum compressed size */
+
+/* Error Management */
+FSE_PUBLIC_API unsigned FSE_isError(size_t code); /* tells if a return value is an error code */
+FSE_PUBLIC_API const char* FSE_getErrorName(size_t code); /* provides error code string (useful for debugging) */
+
+
+/*-*****************************************
+* FSE advanced functions
+******************************************/
+/*! FSE_compress2() :
+ Same as FSE_compress(), but allows the selection of 'maxSymbolValue' and 'tableLog'
+ Both parameters can be defined as '0' to mean : use default value
+ @return : size of compressed data
+ Special values : if return == 0, srcData is not compressible => Nothing is stored within cSrc !!!
+ if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression.
+ if FSE_isError(return), it's an error code.
+*/
+FSE_PUBLIC_API size_t FSE_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
+
+
+/*-*****************************************
+* FSE detailed API
+******************************************/
+/*!
+FSE_compress() does the following:
+1. count symbol occurrence from source[] into table count[] (see hist.h)
+2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog)
+3. save normalized counters to memory buffer using writeNCount()
+4. build encoding table 'CTable' from normalized counters
+5. encode the data stream using encoding table 'CTable'
+
+FSE_decompress() does the following:
+1. read normalized counters with readNCount()
+2. build decoding table 'DTable' from normalized counters
+3. decode the data stream using decoding table 'DTable'
+
+The following API allows targeting specific sub-functions for advanced tasks.
+For example, it's possible to compress several blocks using the same 'CTable',
+or to save and provide normalized distribution using external method.
+*/
+
+/* *** COMPRESSION *** */
+
+/*! FSE_optimalTableLog():
+ dynamically downsize 'tableLog' when conditions are met.
+ It saves CPU time, by using smaller tables, while preserving or even improving compression ratio.
+ @return : recommended tableLog (necessarily <= 'maxTableLog') */
+FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
+
+/*! FSE_normalizeCount():
+ normalize counts so that sum(count[]) == Power_of_2 (2^tableLog)
+ 'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1).
+ @return : tableLog,
+ or an errorCode, which can be tested using FSE_isError() */
+FSE_PUBLIC_API size_t FSE_normalizeCount(short* normalizedCounter, unsigned tableLog,
+ const unsigned* count, size_t srcSize, unsigned maxSymbolValue);
+
+/*! FSE_NCountWriteBound():
+ Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'.
+ Typically useful for allocation purpose. */
+FSE_PUBLIC_API size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog);
+
+/*! FSE_writeNCount():
+ Compactly save 'normalizedCounter' into 'buffer'.
+ @return : size of the compressed table,
+ or an errorCode, which can be tested using FSE_isError(). */
+FSE_PUBLIC_API size_t FSE_writeNCount (void* buffer, size_t bufferSize,
+ const short* normalizedCounter,
+ unsigned maxSymbolValue, unsigned tableLog);
+
+/*! Constructor and Destructor of FSE_CTable.
+ Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */
+typedef unsigned FSE_CTable; /* don't allocate that. It's only meant to be more restrictive than void* */
+FSE_PUBLIC_API FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog);
+FSE_PUBLIC_API void FSE_freeCTable (FSE_CTable* ct);
+
+/*! FSE_buildCTable():
+ Builds `ct`, which must be already allocated, using FSE_createCTable().
+ @return : 0, or an errorCode, which can be tested using FSE_isError() */
+FSE_PUBLIC_API size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
+
+/*! FSE_compress_usingCTable():
+ Compress `src` using `ct` into `dst` which must be already allocated.
+ @return : size of compressed data (<= `dstCapacity`),
+ or 0 if compressed data could not fit into `dst`,
+ or an errorCode, which can be tested using FSE_isError() */
+FSE_PUBLIC_API size_t FSE_compress_usingCTable (void* dst, size_t dstCapacity, const void* src, size_t srcSize, const FSE_CTable* ct);
+
+/*!
+Tutorial :
+----------
+The first step is to count all symbols. FSE_count() does this job very fast.
+Result will be saved into 'count', a table of unsigned int, which must be already allocated, and have 'maxSymbolValuePtr[0]+1' cells.
+'src' is a table of bytes of size 'srcSize'. All values within 'src' MUST be <= maxSymbolValuePtr[0]
+maxSymbolValuePtr[0] will be updated, with its real value (necessarily <= original value)
+FSE_count() will return the number of occurrence of the most frequent symbol.
+This can be used to know if there is a single symbol within 'src', and to quickly evaluate its compressibility.
+If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
+
+The next step is to normalize the frequencies.
+FSE_normalizeCount() will ensure that sum of frequencies is == 2 ^'tableLog'.
+It also guarantees a minimum of 1 to any Symbol with frequency >= 1.
+You can use 'tableLog'==0 to mean "use default tableLog value".
+If you are unsure of which tableLog value to use, you can ask FSE_optimalTableLog(),
+which will provide the optimal valid tableLog given sourceSize, maxSymbolValue, and a user-defined maximum (0 means "default").
+
+The result of FSE_normalizeCount() will be saved into a table,
+called 'normalizedCounter', which is a table of signed short.
+'normalizedCounter' must be already allocated, and have at least 'maxSymbolValue+1' cells.
+The return value is tableLog if everything proceeded as expected.
+It is 0 if there is a single symbol within distribution.
+If there is an error (ex: invalid tableLog value), the function will return an ErrorCode (which can be tested using FSE_isError()).
+
+'normalizedCounter' can be saved in a compact manner to a memory area using FSE_writeNCount().
+'buffer' must be already allocated.
+For guaranteed success, buffer size must be at least FSE_headerBound().
+The result of the function is the number of bytes written into 'buffer'.
+If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError(); ex : buffer size too small).
+
+'normalizedCounter' can then be used to create the compression table 'CTable'.
+The space required by 'CTable' must be already allocated, using FSE_createCTable().
+You can then use FSE_buildCTable() to fill 'CTable'.
+If there is an error, both functions will return an ErrorCode (which can be tested using FSE_isError()).
+
+'CTable' can then be used to compress 'src', with FSE_compress_usingCTable().
+Similar to FSE_count(), the convention is that 'src' is assumed to be a table of char of size 'srcSize'
+The function returns the size of compressed data (without header), necessarily <= `dstCapacity`.
+If it returns '0', compressed data could not fit into 'dst'.
+If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
+*/
+
+
+/* *** DECOMPRESSION *** */
+
+/*! FSE_readNCount():
+ Read compactly saved 'normalizedCounter' from 'rBuffer'.
+ @return : size read from 'rBuffer',
+ or an errorCode, which can be tested using FSE_isError().
+ maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
+FSE_PUBLIC_API size_t FSE_readNCount (short* normalizedCounter,
+ unsigned* maxSymbolValuePtr, unsigned* tableLogPtr,
+ const void* rBuffer, size_t rBuffSize);
+
+/*! Constructor and Destructor of FSE_DTable.
+ Note that its size depends on 'tableLog' */
+typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */
+FSE_PUBLIC_API FSE_DTable* FSE_createDTable(unsigned tableLog);
+FSE_PUBLIC_API void FSE_freeDTable(FSE_DTable* dt);
+
+/*! FSE_buildDTable():
+ Builds 'dt', which must be already allocated, using FSE_createDTable().
+ return : 0, or an errorCode, which can be tested using FSE_isError() */
+FSE_PUBLIC_API size_t FSE_buildDTable (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
+
+/*! FSE_decompress_usingDTable():
+ Decompress compressed source `cSrc` of size `cSrcSize` using `dt`
+ into `dst` which must be already allocated.
+ @return : size of regenerated data (necessarily <= `dstCapacity`),
+ or an errorCode, which can be tested using FSE_isError() */
+FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt);
+
+/*!
+Tutorial :
+----------
+(Note : these functions only decompress FSE-compressed blocks.
+ If block is uncompressed, use memcpy() instead
+ If block is a single repeated byte, use memset() instead )
+
+The first step is to obtain the normalized frequencies of symbols.
+This can be performed by FSE_readNCount() if it was saved using FSE_writeNCount().
+'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short.
+In practice, that means it's necessary to know 'maxSymbolValue' beforehand,
+or size the table to handle worst case situations (typically 256).
+FSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'.
+The result of FSE_readNCount() is the number of bytes read from 'rBuffer'.
+Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that.
+If there is an error, the function will return an error code, which can be tested using FSE_isError().
+
+The next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'.
+This is performed by the function FSE_buildDTable().
+The space required by 'FSE_DTable' must be already allocated using FSE_createDTable().
+If there is an error, the function will return an error code, which can be tested using FSE_isError().
+
+`FSE_DTable` can then be used to decompress `cSrc`, with FSE_decompress_usingDTable().
+`cSrcSize` must be strictly correct, otherwise decompression will fail.
+FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`).
+If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small)
+*/
+
+#endif /* FSE_H */
+
+#if defined(FSE_STATIC_LINKING_ONLY) && !defined(FSE_H_FSE_STATIC_LINKING_ONLY)
+#define FSE_H_FSE_STATIC_LINKING_ONLY
+
+/* *** Dependency *** */
+#include "bitstream.h"
+
+
+/* *****************************************
+* Static allocation
+*******************************************/
+/* FSE buffer bounds */
+#define FSE_NCOUNTBOUND 512
+#define FSE_BLOCKBOUND(size) (size + (size>>7))
+#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
+
+/* It is possible to statically allocate FSE CTable/DTable as a table of FSE_CTable/FSE_DTable using below macros */
+#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2))
+#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<<maxTableLog))
+
+/* or use the size to malloc() space directly. Pay attention to alignment restrictions though */
+#define FSE_CTABLE_SIZE(maxTableLog, maxSymbolValue) (FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(FSE_CTable))
+#define FSE_DTABLE_SIZE(maxTableLog) (FSE_DTABLE_SIZE_U32(maxTableLog) * sizeof(FSE_DTable))
+
+
+/* *****************************************
+ * FSE advanced API
+ ***************************************** */
+
+unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus);
+/**< same as FSE_optimalTableLog(), which used `minus==2` */
+
+/* FSE_compress_wksp() :
+ * Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`).
+ * FSE_WKSP_SIZE_U32() provides the minimum size required for `workSpace` as a table of FSE_CTable.
+ */
+#define FSE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) ( FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) + ((maxTableLog > 12) ? (1 << (maxTableLog - 2)) : 1024) )
+size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
+
+size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits);
+/**< build a fake FSE_CTable, designed for a flat distribution, where each symbol uses nbBits */
+
+size_t FSE_buildCTable_rle (FSE_CTable* ct, unsigned char symbolValue);
+/**< build a fake FSE_CTable, designed to compress always the same symbolValue */
+
+/* FSE_buildCTable_wksp() :
+ * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
+ * `wkspSize` must be >= `(1<<tableLog)`.
+ */
+size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
+
+size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits);
+/**< build a fake FSE_DTable, designed to read a flat distribution where each symbol uses nbBits */
+
+size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue);
+/**< build a fake FSE_DTable, designed to always generate the same symbolValue */
+
+size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, FSE_DTable* workSpace, unsigned maxLog);
+/**< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DTABLE_SIZE_U32(maxLog)` */
+
+typedef enum {
+ FSE_repeat_none, /**< Cannot use the previous table */
+ FSE_repeat_check, /**< Can use the previous table but it must be checked */
+ FSE_repeat_valid /**< Can use the previous table and it is asumed to be valid */
+ } FSE_repeat;
+
+/* *****************************************
+* FSE symbol compression API
+*******************************************/
+/*!
+ This API consists of small unitary functions, which highly benefit from being inlined.
+ Hence their body are included in next section.
+*/
+typedef struct {
+ ptrdiff_t value;
+ const void* stateTable;
+ const void* symbolTT;
+ unsigned stateLog;
+} FSE_CState_t;
+
+static void FSE_initCState(FSE_CState_t* CStatePtr, const FSE_CTable* ct);
+
+static void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* CStatePtr, unsigned symbol);
+
+static void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* CStatePtr);
+
+/**<
+These functions are inner components of FSE_compress_usingCTable().
+They allow the creation of custom streams, mixing multiple tables and bit sources.
+
+A key property to keep in mind is that encoding and decoding are done **in reverse direction**.
+So the first symbol you will encode is the last you will decode, like a LIFO stack.
+
+You will need a few variables to track your CStream. They are :
+
+FSE_CTable ct; // Provided by FSE_buildCTable()
+BIT_CStream_t bitStream; // bitStream tracking structure
+FSE_CState_t state; // State tracking structure (can have several)
+
+
+The first thing to do is to init bitStream and state.
+ size_t errorCode = BIT_initCStream(&bitStream, dstBuffer, maxDstSize);
+ FSE_initCState(&state, ct);
+
+Note that BIT_initCStream() can produce an error code, so its result should be tested, using FSE_isError();
+You can then encode your input data, byte after byte.
+FSE_encodeSymbol() outputs a maximum of 'tableLog' bits at a time.
+Remember decoding will be done in reverse direction.
+ FSE_encodeByte(&bitStream, &state, symbol);
+
+At any time, you can also add any bit sequence.
+Note : maximum allowed nbBits is 25, for compatibility with 32-bits decoders
+ BIT_addBits(&bitStream, bitField, nbBits);
+
+The above methods don't commit data to memory, they just store it into local register, for speed.
+Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
+Writing data to memory is a manual operation, performed by the flushBits function.
+ BIT_flushBits(&bitStream);
+
+Your last FSE encoding operation shall be to flush your last state value(s).
+ FSE_flushState(&bitStream, &state);
+
+Finally, you must close the bitStream.
+The function returns the size of CStream in bytes.
+If data couldn't fit into dstBuffer, it will return a 0 ( == not compressible)
+If there is an error, it returns an errorCode (which can be tested using FSE_isError()).
+ size_t size = BIT_closeCStream(&bitStream);
+*/
+
+
+/* *****************************************
+* FSE symbol decompression API
+*******************************************/
+typedef struct {
+ size_t state;
+ const void* table; /* precise table may vary, depending on U16 */
+} FSE_DState_t;
+
+
+static void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt);
+
+static unsigned char FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
+
+static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr);
+
+/**<
+Let's now decompose FSE_decompress_usingDTable() into its unitary components.
+You will decode FSE-encoded symbols from the bitStream,
+and also any other bitFields you put in, **in reverse order**.
+
+You will need a few variables to track your bitStream. They are :
+
+BIT_DStream_t DStream; // Stream context
+FSE_DState_t DState; // State context. Multiple ones are possible
+FSE_DTable* DTablePtr; // Decoding table, provided by FSE_buildDTable()
+
+The first thing to do is to init the bitStream.
+ errorCode = BIT_initDStream(&DStream, srcBuffer, srcSize);
+
+You should then retrieve your initial state(s)
+(in reverse flushing order if you have several ones) :
+ errorCode = FSE_initDState(&DState, &DStream, DTablePtr);
+
+You can then decode your data, symbol after symbol.
+For information the maximum number of bits read by FSE_decodeSymbol() is 'tableLog'.
+Keep in mind that symbols are decoded in reverse order, like a LIFO stack (last in, first out).
+ unsigned char symbol = FSE_decodeSymbol(&DState, &DStream);
+
+You can retrieve any bitfield you eventually stored into the bitStream (in reverse order)
+Note : maximum allowed nbBits is 25, for 32-bits compatibility
+ size_t bitField = BIT_readBits(&DStream, nbBits);
+
+All above operations only read from local register (which size depends on size_t).
+Refueling the register from memory is manually performed by the reload method.
+ endSignal = FSE_reloadDStream(&DStream);
+
+BIT_reloadDStream() result tells if there is still some more data to read from DStream.
+BIT_DStream_unfinished : there is still some data left into the DStream.
+BIT_DStream_endOfBuffer : Dstream reached end of buffer. Its container may no longer be completely filled.
+BIT_DStream_completed : Dstream reached its exact end, corresponding in general to decompression completed.
+BIT_DStream_tooFar : Dstream went too far. Decompression result is corrupted.
+
+When reaching end of buffer (BIT_DStream_endOfBuffer), progress slowly, notably if you decode multiple symbols per loop,
+to properly detect the exact end of stream.
+After each decoded symbol, check if DStream is fully consumed using this simple test :
+ BIT_reloadDStream(&DStream) >= BIT_DStream_completed
+
+When it's done, verify decompression is fully completed, by checking both DStream and the relevant states.
+Checking if DStream has reached its end is performed by :
+ BIT_endOfDStream(&DStream);
+Check also the states. There might be some symbols left there, if some high probability ones (>50%) are possible.
+ FSE_endOfDState(&DState);
+*/
+
+
+/* *****************************************
+* FSE unsafe API
+*******************************************/
+static unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
+/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
+
+
+/* *****************************************
+* Implementation of inlined functions
+*******************************************/
+typedef struct {
+ int deltaFindState;
+ U32 deltaNbBits;
+} FSE_symbolCompressionTransform; /* total 8 bytes */
+
+MEM_STATIC void FSE_initCState(FSE_CState_t* statePtr, const FSE_CTable* ct)
+{
+ const void* ptr = ct;
+ const U16* u16ptr = (const U16*) ptr;
+ const U32 tableLog = MEM_read16(ptr);
+ statePtr->value = (ptrdiff_t)1<<tableLog;
+ statePtr->stateTable = u16ptr+2;
+ statePtr->symbolTT = ((const U32*)ct + 1 + (tableLog ? (1<<(tableLog-1)) : 1));
+ statePtr->stateLog = tableLog;
+}
+
+
+/*! FSE_initCState2() :
+* Same as FSE_initCState(), but the first symbol to include (which will be the last to be read)
+* uses the smallest state value possible, saving the cost of this symbol */
+MEM_STATIC void FSE_initCState2(FSE_CState_t* statePtr, const FSE_CTable* ct, U32 symbol)
+{
+ FSE_initCState(statePtr, ct);
+ { const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
+ const U16* stateTable = (const U16*)(statePtr->stateTable);
+ U32 nbBitsOut = (U32)((symbolTT.deltaNbBits + (1<<15)) >> 16);
+ statePtr->value = (nbBitsOut << 16) - symbolTT.deltaNbBits;
+ statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
+ }
+}
+
+MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, U32 symbol)
+{
+ FSE_symbolCompressionTransform const symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
+ const U16* const stateTable = (const U16*)(statePtr->stateTable);
+ U32 const nbBitsOut = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16);
+ BIT_addBits(bitC, statePtr->value, nbBitsOut);
+ statePtr->value = stateTable[ (statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
+}
+
+MEM_STATIC void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* statePtr)
+{
+ BIT_addBits(bitC, statePtr->value, statePtr->stateLog);
+ BIT_flushBits(bitC);
+}
+
+
+/* FSE_getMaxNbBits() :
+ * Approximate maximum cost of a symbol, in bits.
+ * Fractional get rounded up (i.e : a symbol with a normalized frequency of 3 gives the same result as a frequency of 2)
+ * note 1 : assume symbolValue is valid (<= maxSymbolValue)
+ * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */
+MEM_STATIC U32 FSE_getMaxNbBits(const void* symbolTTPtr, U32 symbolValue)
+{
+ const FSE_symbolCompressionTransform* symbolTT = (const FSE_symbolCompressionTransform*) symbolTTPtr;
+ return (symbolTT[symbolValue].deltaNbBits + ((1<<16)-1)) >> 16;
+}
+
+/* FSE_bitCost() :
+ * Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits)
+ * note 1 : assume symbolValue is valid (<= maxSymbolValue)
+ * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */
+MEM_STATIC U32 FSE_bitCost(const void* symbolTTPtr, U32 tableLog, U32 symbolValue, U32 accuracyLog)
+{
+ const FSE_symbolCompressionTransform* symbolTT = (const FSE_symbolCompressionTransform*) symbolTTPtr;
+ U32 const minNbBits = symbolTT[symbolValue].deltaNbBits >> 16;
+ U32 const threshold = (minNbBits+1) << 16;
+ assert(tableLog < 16);
+ assert(accuracyLog < 31-tableLog); /* ensure enough room for renormalization double shift */
+ { U32 const tableSize = 1 << tableLog;
+ U32 const deltaFromThreshold = threshold - (symbolTT[symbolValue].deltaNbBits + tableSize);
+ U32 const normalizedDeltaFromThreshold = (deltaFromThreshold << accuracyLog) >> tableLog; /* linear interpolation (very approximate) */
+ U32 const bitMultiplier = 1 << accuracyLog;
+ assert(symbolTT[symbolValue].deltaNbBits + tableSize <= threshold);
+ assert(normalizedDeltaFromThreshold <= bitMultiplier);
+ return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold;
+ }
+}
+
+
+/* ====== Decompression ====== */
+
+typedef struct {
+ U16 tableLog;
+ U16 fastMode;
+} FSE_DTableHeader; /* sizeof U32 */
+
+typedef struct
+{
+ unsigned short newState;
+ unsigned char symbol;
+ unsigned char nbBits;
+} FSE_decode_t; /* size == U32 */
+
+MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt)
+{
+ const void* ptr = dt;
+ const FSE_DTableHeader* const DTableH = (const FSE_DTableHeader*)ptr;
+ DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);
+ BIT_reloadDStream(bitD);
+ DStatePtr->table = dt + 1;
+}
+
+MEM_STATIC BYTE FSE_peekSymbol(const FSE_DState_t* DStatePtr)
+{
+ FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
+ return DInfo.symbol;
+}
+
+MEM_STATIC void FSE_updateState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
+{
+ FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
+ U32 const nbBits = DInfo.nbBits;
+ size_t const lowBits = BIT_readBits(bitD, nbBits);
+ DStatePtr->state = DInfo.newState + lowBits;
+}
+
+MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
+{
+ FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
+ U32 const nbBits = DInfo.nbBits;
+ BYTE const symbol = DInfo.symbol;
+ size_t const lowBits = BIT_readBits(bitD, nbBits);
+
+ DStatePtr->state = DInfo.newState + lowBits;
+ return symbol;
+}
+
+/*! FSE_decodeSymbolFast() :
+ unsafe, only works if no symbol has a probability > 50% */
+MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
+{
+ FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
+ U32 const nbBits = DInfo.nbBits;
+ BYTE const symbol = DInfo.symbol;
+ size_t const lowBits = BIT_readBitsFast(bitD, nbBits);
+
+ DStatePtr->state = DInfo.newState + lowBits;
+ return symbol;
+}
+
+MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)
+{
+ return DStatePtr->state == 0;
+}
+
+
+
+#ifndef FSE_COMMONDEFS_ONLY
+
+/* **************************************************************
+* Tuning parameters
+****************************************************************/
+/*!MEMORY_USAGE :
+* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+* Increasing memory usage improves compression ratio
+* Reduced memory usage can improve speed, due to cache effect
+* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
+#ifndef FSE_MAX_MEMORY_USAGE
+# define FSE_MAX_MEMORY_USAGE 14
+#endif
+#ifndef FSE_DEFAULT_MEMORY_USAGE
+# define FSE_DEFAULT_MEMORY_USAGE 13
+#endif
+
+/*!FSE_MAX_SYMBOL_VALUE :
+* Maximum symbol value authorized.
+* Required for proper stack allocation */
+#ifndef FSE_MAX_SYMBOL_VALUE
+# define FSE_MAX_SYMBOL_VALUE 255
+#endif
+
+/* **************************************************************
+* template functions type & suffix
+****************************************************************/
+#define FSE_FUNCTION_TYPE BYTE
+#define FSE_FUNCTION_EXTENSION
+#define FSE_DECODE_TYPE FSE_decode_t
+
+
+#endif /* !FSE_COMMONDEFS_ONLY */
+
+
+/* ***************************************************************
+* Constants
+*****************************************************************/
+#define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2)
+#define FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG)
+#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1)
+#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2)
+#define FSE_MIN_TABLELOG 5
+
+#define FSE_TABLELOG_ABSOLUTE_MAX 15
+#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX
+# error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported"
+#endif
+
+#define FSE_TABLESTEP(tableSize) ((tableSize>>1) + (tableSize>>3) + 3)
+
+
+#endif /* FSE_STATIC_LINKING_ONLY */
+
+
+#if defined (__cplusplus)
+}
+#endif
diff --git a/grub-core/lib/zstd/fse_decompress.c b/grub-core/lib/zstd/fse_decompress.c
new file mode 100644
index 0000000..2227b84
--- /dev/null
+++ b/grub-core/lib/zstd/fse_decompress.c
@@ -0,0 +1,309 @@
+/* ******************************************************************
+ FSE : Finite State Entropy decoder
+ Copyright (C) 2013-2015, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+
+/* **************************************************************
+* Includes
+****************************************************************/
+#include <stdlib.h> /* malloc, free, qsort */
+#include <string.h> /* memcpy, memset */
+#include "bitstream.h"
+#include "compiler.h"
+#define FSE_STATIC_LINKING_ONLY
+#include "fse.h"
+#include "error_private.h"
+
+
+/* **************************************************************
+* Error Management
+****************************************************************/
+#define FSE_isError ERR_isError
+#define FSE_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */
+
+/* check and forward error code */
+#define CHECK_F(f) { size_t const e = f; if (FSE_isError(e)) return e; }
+
+
+/* **************************************************************
+* Templates
+****************************************************************/
+/*
+ designed to be included
+ for type-specific functions (template emulation in C)
+ Objective is to write these functions only once, for improved maintenance
+*/
+
+/* safety checks */
+#ifndef FSE_FUNCTION_EXTENSION
+# error "FSE_FUNCTION_EXTENSION must be defined"
+#endif
+#ifndef FSE_FUNCTION_TYPE
+# error "FSE_FUNCTION_TYPE must be defined"
+#endif
+
+/* Function names */
+#define FSE_CAT(X,Y) X##Y
+#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
+#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
+
+
+/* Function templates */
+FSE_DTable* FSE_createDTable (unsigned tableLog)
+{
+ if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX;
+ return (FSE_DTable*)calloc( FSE_DTABLE_SIZE_U32(tableLog), sizeof (U32) );
+}
+
+void FSE_freeDTable (FSE_DTable* dt)
+{
+ free(dt);
+}
+
+size_t FSE_buildDTable(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
+{
+ void* const tdPtr = dt+1; /* because *dt is unsigned, 32-bits aligned on 32-bits */
+ FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*) (tdPtr);
+ U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1];
+
+ U32 const maxSV1 = maxSymbolValue + 1;
+ U32 const tableSize = 1 << tableLog;
+ U32 highThreshold = tableSize-1;
+
+ /* Sanity Checks */
+ if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
+ if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
+
+ /* Init, lay down lowprob symbols */
+ { FSE_DTableHeader DTableH;
+ DTableH.tableLog = (U16)tableLog;
+ DTableH.fastMode = 1;
+ { S16 const largeLimit= (S16)(1 << (tableLog-1));
+ U32 s;
+ for (s=0; s<maxSV1; s++) {
+ if (normalizedCounter[s]==-1) {
+ tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
+ symbolNext[s] = 1;
+ } else {
+ if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
+ symbolNext[s] = normalizedCounter[s];
+ } } }
+ memcpy(dt, &DTableH, sizeof(DTableH));
+ }
+
+ /* Spread symbols */
+ { U32 const tableMask = tableSize-1;
+ U32 const step = FSE_TABLESTEP(tableSize);
+ U32 s, position = 0;
+ for (s=0; s<maxSV1; s++) {
+ int i;
+ for (i=0; i<normalizedCounter[s]; i++) {
+ tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
+ position = (position + step) & tableMask;
+ while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */
+ } }
+ if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
+ }
+
+ /* Build Decoding table */
+ { U32 u;
+ for (u=0; u<tableSize; u++) {
+ FSE_FUNCTION_TYPE const symbol = (FSE_FUNCTION_TYPE)(tableDecode[u].symbol);
+ U32 const nextState = symbolNext[symbol]++;
+ tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32(nextState) );
+ tableDecode[u].newState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
+ } }
+
+ return 0;
+}
+
+
+#ifndef FSE_COMMONDEFS_ONLY
+
+/*-*******************************************************
+* Decompression (Byte symbols)
+*********************************************************/
+size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue)
+{
+ void* ptr = dt;
+ FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
+ void* dPtr = dt + 1;
+ FSE_decode_t* const cell = (FSE_decode_t*)dPtr;
+
+ DTableH->tableLog = 0;
+ DTableH->fastMode = 0;
+
+ cell->newState = 0;
+ cell->symbol = symbolValue;
+ cell->nbBits = 0;
+
+ return 0;
+}
+
+
+size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)
+{
+ void* ptr = dt;
+ FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
+ void* dPtr = dt + 1;
+ FSE_decode_t* const dinfo = (FSE_decode_t*)dPtr;
+ const unsigned tableSize = 1 << nbBits;
+ const unsigned tableMask = tableSize - 1;
+ const unsigned maxSV1 = tableMask+1;
+ unsigned s;
+
+ /* Sanity checks */
+ if (nbBits < 1) return ERROR(GENERIC); /* min size */
+
+ /* Build Decoding Table */
+ DTableH->tableLog = (U16)nbBits;
+ DTableH->fastMode = 1;
+ for (s=0; s<maxSV1; s++) {
+ dinfo[s].newState = 0;
+ dinfo[s].symbol = (BYTE)s;
+ dinfo[s].nbBits = (BYTE)nbBits;
+ }
+
+ return 0;
+}
+
+FORCE_INLINE_TEMPLATE size_t FSE_decompress_usingDTable_generic(
+ void* dst, size_t maxDstSize,
+ const void* cSrc, size_t cSrcSize,
+ const FSE_DTable* dt, const unsigned fast)
+{
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* op = ostart;
+ BYTE* const omax = op + maxDstSize;
+ BYTE* const olimit = omax-3;
+
+ BIT_DStream_t bitD;
+ FSE_DState_t state1;
+ FSE_DState_t state2;
+
+ /* Init */
+ CHECK_F(BIT_initDStream(&bitD, cSrc, cSrcSize));
+
+ FSE_initDState(&state1, &bitD, dt);
+ FSE_initDState(&state2, &bitD, dt);
+
+#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
+
+ /* 4 symbols per loop */
+ for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) & (op<olimit) ; op+=4) {
+ op[0] = FSE_GETSYMBOL(&state1);
+
+ if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
+ BIT_reloadDStream(&bitD);
+
+ op[1] = FSE_GETSYMBOL(&state2);
+
+ if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
+ { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } }
+
+ op[2] = FSE_GETSYMBOL(&state1);
+
+ if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
+ BIT_reloadDStream(&bitD);
+
+ op[3] = FSE_GETSYMBOL(&state2);
+ }
+
+ /* tail */
+ /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */
+ while (1) {
+ if (op>(omax-2)) return ERROR(dstSize_tooSmall);
+ *op++ = FSE_GETSYMBOL(&state1);
+ if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) {
+ *op++ = FSE_GETSYMBOL(&state2);
+ break;
+ }
+
+ if (op>(omax-2)) return ERROR(dstSize_tooSmall);
+ *op++ = FSE_GETSYMBOL(&state2);
+ if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) {
+ *op++ = FSE_GETSYMBOL(&state1);
+ break;
+ } }
+
+ return op-ostart;
+}
+
+
+size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,
+ const void* cSrc, size_t cSrcSize,
+ const FSE_DTable* dt)
+{
+ const void* ptr = dt;
+ const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr;
+ const U32 fastMode = DTableH->fastMode;
+
+ /* select fast mode (static) */
+ if (fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
+ return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
+}
+
+
+size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, FSE_DTable* workSpace, unsigned maxLog)
+{
+ const BYTE* const istart = (const BYTE*)cSrc;
+ const BYTE* ip = istart;
+ short counting[FSE_MAX_SYMBOL_VALUE+1];
+ unsigned tableLog;
+ unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
+
+ /* normal FSE decoding mode */
+ size_t const NCountLength = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
+ if (FSE_isError(NCountLength)) return NCountLength;
+ //if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong); /* too small input size; supposed to be already checked in NCountLength, only remaining case : NCountLength==cSrcSize */
+ if (tableLog > maxLog) return ERROR(tableLog_tooLarge);
+ ip += NCountLength;
+ cSrcSize -= NCountLength;
+
+ CHECK_F( FSE_buildDTable (workSpace, counting, maxSymbolValue, tableLog) );
+
+ return FSE_decompress_usingDTable (dst, dstCapacity, ip, cSrcSize, workSpace); /* always return, even if it is an error code */
+}
+
+
+typedef FSE_DTable DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];
+
+size_t FSE_decompress(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize)
+{
+ DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */
+ return FSE_decompress_wksp(dst, dstCapacity, cSrc, cSrcSize, dt, FSE_MAX_TABLELOG);
+}
+
+
+
+#endif /* FSE_COMMONDEFS_ONLY */
diff --git a/grub-core/lib/zstd/huf.h b/grub-core/lib/zstd/huf.h
new file mode 100644
index 0000000..de94641
--- /dev/null
+++ b/grub-core/lib/zstd/huf.h
@@ -0,0 +1,334 @@
+/* ******************************************************************
+ huff0 huffman codec,
+ part of Finite State Entropy library
+ Copyright (C) 2013-present, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+****************************************************************** */
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#ifndef HUF_H_298734234
+#define HUF_H_298734234
+
+/* *** Dependencies *** */
+#include <stddef.h> /* size_t */
+
+
+/* *** library symbols visibility *** */
+/* Note : when linking with -fvisibility=hidden on gcc, or by default on Visual,
+ * HUF symbols remain "private" (internal symbols for library only).
+ * Set macro FSE_DLL_EXPORT to 1 if you want HUF symbols visible on DLL interface */
+#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4)
+# define HUF_PUBLIC_API __attribute__ ((visibility ("default")))
+#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */
+# define HUF_PUBLIC_API __declspec(dllexport)
+#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1)
+# define HUF_PUBLIC_API __declspec(dllimport) /* not required, just to generate faster code (saves a function pointer load from IAT and an indirect jump) */
+#else
+# define HUF_PUBLIC_API
+#endif
+
+
+/* ========================== */
+/* *** simple functions *** */
+/* ========================== */
+
+/** HUF_compress() :
+ * Compress content from buffer 'src', of size 'srcSize', into buffer 'dst'.
+ * 'dst' buffer must be already allocated.
+ * Compression runs faster if `dstCapacity` >= HUF_compressBound(srcSize).
+ * `srcSize` must be <= `HUF_BLOCKSIZE_MAX` == 128 KB.
+ * @return : size of compressed data (<= `dstCapacity`).
+ * Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!
+ * if HUF_isError(return), compression failed (more details using HUF_getErrorName())
+ */
+HUF_PUBLIC_API size_t HUF_compress(void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize);
+
+/** HUF_decompress() :
+ * Decompress HUF data from buffer 'cSrc', of size 'cSrcSize',
+ * into already allocated buffer 'dst', of minimum size 'dstSize'.
+ * `originalSize` : **must** be the ***exact*** size of original (uncompressed) data.
+ * Note : in contrast with FSE, HUF_decompress can regenerate
+ * RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data,
+ * because it knows size to regenerate (originalSize).
+ * @return : size of regenerated data (== originalSize),
+ * or an error code, which can be tested using HUF_isError()
+ */
+HUF_PUBLIC_API size_t HUF_decompress(void* dst, size_t originalSize,
+ const void* cSrc, size_t cSrcSize);
+
+
+/* *** Tool functions *** */
+#define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */
+HUF_PUBLIC_API size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */
+
+/* Error Management */
+HUF_PUBLIC_API unsigned HUF_isError(size_t code); /**< tells if a return value is an error code */
+HUF_PUBLIC_API const char* HUF_getErrorName(size_t code); /**< provides error code string (useful for debugging) */
+
+
+/* *** Advanced function *** */
+
+/** HUF_compress2() :
+ * Same as HUF_compress(), but offers control over `maxSymbolValue` and `tableLog`.
+ * `maxSymbolValue` must be <= HUF_SYMBOLVALUE_MAX .
+ * `tableLog` must be `<= HUF_TABLELOG_MAX` . */
+HUF_PUBLIC_API size_t HUF_compress2 (void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned tableLog);
+
+/** HUF_compress4X_wksp() :
+ * Same as HUF_compress2(), but uses externally allocated `workSpace`.
+ * `workspace` must have minimum alignment of 4, and be at least as large as HUF_WORKSPACE_SIZE */
+#define HUF_WORKSPACE_SIZE (6 << 10)
+#define HUF_WORKSPACE_SIZE_U32 (HUF_WORKSPACE_SIZE / sizeof(U32))
+HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned tableLog,
+ void* workSpace, size_t wkspSize);
+
+#endif /* HUF_H_298734234 */
+
+/* ******************************************************************
+ * WARNING !!
+ * The following section contains advanced and experimental definitions
+ * which shall never be used in the context of a dynamic library,
+ * because they are not guaranteed to remain stable in the future.
+ * Only consider them in association with static linking.
+ * *****************************************************************/
+#if defined(HUF_STATIC_LINKING_ONLY) && !defined(HUF_H_HUF_STATIC_LINKING_ONLY)
+#define HUF_H_HUF_STATIC_LINKING_ONLY
+
+/* *** Dependencies *** */
+#include "mem.h" /* U32 */
+
+
+/* *** Constants *** */
+#define HUF_TABLELOG_MAX 12 /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */
+#define HUF_TABLELOG_DEFAULT 11 /* default tableLog value when none specified */
+#define HUF_SYMBOLVALUE_MAX 255
+
+#define HUF_TABLELOG_ABSOLUTEMAX 15 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
+#if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX)
+# error "HUF_TABLELOG_MAX is too large !"
+#endif
+
+
+/* ****************************************
+* Static allocation
+******************************************/
+/* HUF buffer bounds */
+#define HUF_CTABLEBOUND 129
+#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8) /* only true when incompressible is pre-filtered with fast heuristic */
+#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
+
+/* static allocation of HUF's Compression Table */
+#define HUF_CTABLE_SIZE_U32(maxSymbolValue) ((maxSymbolValue)+1) /* Use tables of U32, for proper alignment */
+#define HUF_CTABLE_SIZE(maxSymbolValue) (HUF_CTABLE_SIZE_U32(maxSymbolValue) * sizeof(U32))
+#define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \
+ U32 name##hb[HUF_CTABLE_SIZE_U32(maxSymbolValue)]; \
+ void* name##hv = &(name##hb); \
+ HUF_CElt* name = (HUF_CElt*)(name##hv) /* no final ; */
+
+/* static allocation of HUF's DTable */
+typedef U32 HUF_DTable;
+#define HUF_DTABLE_SIZE(maxTableLog) (1 + (1<<(maxTableLog)))
+#define HUF_CREATE_STATIC_DTABLEX1(DTable, maxTableLog) \
+ HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = { ((U32)((maxTableLog)-1) * 0x01000001) }
+#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
+ HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog) * 0x01000001) }
+
+
+/* ****************************************
+* Advanced decompression functions
+******************************************/
+size_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */
+size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */
+
+size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< decodes RLE and uncompressed */
+size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< considers RLE and uncompressed as errors */
+size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< considers RLE and uncompressed as errors */
+size_t HUF_decompress4X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */
+size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< single-symbol decoder */
+size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */
+size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< double-symbols decoder */
+
+
+/* ****************************************
+ * HUF detailed API
+ * ****************************************/
+
+/*! HUF_compress() does the following:
+ * 1. count symbol occurrence from source[] into table count[] using FSE_count() (exposed within "fse.h")
+ * 2. (optional) refine tableLog using HUF_optimalTableLog()
+ * 3. build Huffman table from count using HUF_buildCTable()
+ * 4. save Huffman table to memory buffer using HUF_writeCTable()
+ * 5. encode the data stream using HUF_compress4X_usingCTable()
+ *
+ * The following API allows targeting specific sub-functions for advanced tasks.
+ * For example, it's possible to compress several blocks using the same 'CTable',
+ * or to save and regenerate 'CTable' using external methods.
+ */
+unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
+typedef struct HUF_CElt_s HUF_CElt; /* incomplete type */
+size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits); /* @return : maxNbBits; CTable and count can overlap. In which case, CTable will overwrite count content */
+size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog);
+size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
+
+typedef enum {
+ HUF_repeat_none, /**< Cannot use the previous table */
+ HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */
+ HUF_repeat_valid /**< Can use the previous table and it is assumed to be valid */
+ } HUF_repeat;
+/** HUF_compress4X_repeat() :
+ * Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
+ * If it uses hufTable it does not modify hufTable or repeat.
+ * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
+ * If preferRepeat then the old table will always be used if valid. */
+size_t HUF_compress4X_repeat(void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned tableLog,
+ void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
+ HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2);
+
+/** HUF_buildCTable_wksp() :
+ * Same as HUF_buildCTable(), but using externally allocated scratch buffer.
+ * `workSpace` must be aligned on 4-bytes boundaries, and its size must be >= HUF_CTABLE_WORKSPACE_SIZE.
+ */
+#define HUF_CTABLE_WORKSPACE_SIZE_U32 (2*HUF_SYMBOLVALUE_MAX +1 +1)
+#define HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned))
+size_t HUF_buildCTable_wksp (HUF_CElt* tree,
+ const U32* count, U32 maxSymbolValue, U32 maxNbBits,
+ void* workSpace, size_t wkspSize);
+
+/*! HUF_readStats() :
+ * Read compact Huffman tree, saved by HUF_writeCTable().
+ * `huffWeight` is destination buffer.
+ * @return : size read from `src` , or an error Code .
+ * Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */
+size_t HUF_readStats(BYTE* huffWeight, size_t hwSize,
+ U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr,
+ const void* src, size_t srcSize);
+
+/** HUF_readCTable() :
+ * Loading a CTable saved with HUF_writeCTable() */
+size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize);
+
+/** HUF_getNbBits() :
+ * Read nbBits from CTable symbolTable, for symbol `symbolValue` presumed <= HUF_SYMBOLVALUE_MAX
+ * Note 1 : is not inlined, as HUF_CElt definition is private
+ * Note 2 : const void* used, so that it can provide a statically allocated table as argument (which uses type U32) */
+U32 HUF_getNbBits(const void* symbolTable, U32 symbolValue);
+
+/*
+ * HUF_decompress() does the following:
+ * 1. select the decompression algorithm (X1, X2) based on pre-computed heuristics
+ * 2. build Huffman table from save, using HUF_readDTableX?()
+ * 3. decode 1 or 4 segments in parallel using HUF_decompress?X?_usingDTable()
+ */
+
+/** HUF_selectDecoder() :
+ * Tells which decoder is likely to decode faster,
+ * based on a set of pre-computed metrics.
+ * @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 .
+ * Assumption : 0 < dstSize <= 128 KB */
+U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize);
+
+/**
+ * The minimum workspace size for the `workSpace` used in
+ * HUF_readDTableX1_wksp() and HUF_readDTableX2_wksp().
+ *
+ * The space used depends on HUF_TABLELOG_MAX, ranging from ~1500 bytes when
+ * HUF_TABLE_LOG_MAX=12 to ~1850 bytes when HUF_TABLE_LOG_MAX=15.
+ * Buffer overflow errors may potentially occur if code modifications result in
+ * a required workspace size greater than that specified in the following
+ * macro.
+ */
+#define HUF_DECOMPRESS_WORKSPACE_SIZE (2 << 10)
+#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32))
+
+size_t HUF_readDTableX1 (HUF_DTable* DTable, const void* src, size_t srcSize);
+size_t HUF_readDTableX1_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
+size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize);
+size_t HUF_readDTableX2_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
+
+size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+size_t HUF_decompress4X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+
+
+/* ====================== */
+/* single stream variants */
+/* ====================== */
+
+size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
+size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */
+size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
+/** HUF_compress1X_repeat() :
+ * Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
+ * If it uses hufTable it does not modify hufTable or repeat.
+ * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
+ * If preferRepeat then the old table will always be used if valid. */
+size_t HUF_compress1X_repeat(void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned tableLog,
+ void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
+ HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2);
+
+size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */
+size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbol decoder */
+
+size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
+size_t HUF_decompress1X_DCtx_wksp (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);
+size_t HUF_decompress1X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */
+size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< single-symbol decoder */
+size_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */
+size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< double-symbols decoder */
+
+size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); /**< automatic selection of sing or double symbol decoder, based on DTable */
+size_t HUF_decompress1X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+size_t HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+
+/* BMI2 variants.
+ * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0.
+ */
+size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
+size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
+size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
+size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
+
+#endif /* HUF_STATIC_LINKING_ONLY */
+
+#if defined (__cplusplus)
+}
+#endif
diff --git a/grub-core/lib/zstd/huf_decompress.c b/grub-core/lib/zstd/huf_decompress.c
new file mode 100644
index 0000000..83ecaff
--- /dev/null
+++ b/grub-core/lib/zstd/huf_decompress.c
@@ -0,0 +1,1096 @@
+/* ******************************************************************
+ huff0 huffman decoder,
+ part of Finite State Entropy library
+ Copyright (C) 2013-present, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
+****************************************************************** */
+
+/* **************************************************************
+* Dependencies
+****************************************************************/
+#include <string.h> /* memcpy, memset */
+#include "compiler.h"
+#include "bitstream.h" /* BIT_* */
+#include "fse.h" /* to compress headers */
+#define HUF_STATIC_LINKING_ONLY
+#include "huf.h"
+#include "error_private.h"
+
+
+/* **************************************************************
+* Error Management
+****************************************************************/
+#define HUF_isError ERR_isError
+#define CHECK_F(f) { size_t const err_ = (f); if (HUF_isError(err_)) return err_; }
+
+
+/* **************************************************************
+* Byte alignment for workSpace management
+****************************************************************/
+#define HUF_ALIGN(x, a) HUF_ALIGN_MASK((x), (a) - 1)
+#define HUF_ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
+
+
+/*-***************************/
+/* generic DTableDesc */
+/*-***************************/
+typedef struct { BYTE maxTableLog; BYTE tableType; BYTE tableLog; BYTE reserved; } DTableDesc;
+
+static DTableDesc HUF_getDTableDesc(const HUF_DTable* table)
+{
+ DTableDesc dtd;
+ memcpy(&dtd, table, sizeof(dtd));
+ return dtd;
+}
+
+
+/*-***************************/
+/* single-symbol decoding */
+/*-***************************/
+typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX1; /* single-symbol decoding */
+
+size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize)
+{
+ U32 tableLog = 0;
+ U32 nbSymbols = 0;
+ size_t iSize;
+ void* const dtPtr = DTable + 1;
+ HUF_DEltX1* const dt = (HUF_DEltX1*)dtPtr;
+
+ U32* rankVal;
+ BYTE* huffWeight;
+ size_t spaceUsed32 = 0;
+
+ rankVal = (U32 *)workSpace + spaceUsed32;
+ spaceUsed32 += HUF_TABLELOG_ABSOLUTEMAX + 1;
+ huffWeight = (BYTE *)((U32 *)workSpace + spaceUsed32);
+ spaceUsed32 += HUF_ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2;
+
+ if ((spaceUsed32 << 2) > wkspSize) return ERROR(tableLog_tooLarge);
+
+ DEBUG_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable));
+ /* memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */
+
+ iSize = HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize);
+ if (HUF_isError(iSize)) return iSize;
+
+ /* Table header */
+ { DTableDesc dtd = HUF_getDTableDesc(DTable);
+ if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge); /* DTable too small, Huffman tree cannot fit in */
+ dtd.tableType = 0;
+ dtd.tableLog = (BYTE)tableLog;
+ memcpy(DTable, &dtd, sizeof(dtd));
+ }
+
+ /* Calculate starting value for each rank */
+ { U32 n, nextRankStart = 0;
+ for (n=1; n<tableLog+1; n++) {
+ U32 const current = nextRankStart;
+ nextRankStart += (rankVal[n] << (n-1));
+ rankVal[n] = current;
+ } }
+
+ /* fill DTable */
+ { U32 n;
+ for (n=0; n<nbSymbols; n++) {
+ U32 const w = huffWeight[n];
+ U32 const length = (1 << w) >> 1;
+ U32 u;
+ HUF_DEltX1 D;
+ D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w);
+ for (u = rankVal[w]; u < rankVal[w] + length; u++)
+ dt[u] = D;
+ rankVal[w] += length;
+ } }
+
+ return iSize;
+}
+
+size_t HUF_readDTableX1(HUF_DTable* DTable, const void* src, size_t srcSize)
+{
+ U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
+ return HUF_readDTableX1_wksp(DTable, src, srcSize,
+ workSpace, sizeof(workSpace));
+}
+
+FORCE_INLINE_TEMPLATE BYTE
+HUF_decodeSymbolX1(BIT_DStream_t* Dstream, const HUF_DEltX1* dt, const U32 dtLog)
+{
+ size_t const val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
+ BYTE const c = dt[val].byte;
+ BIT_skipBits(Dstream, dt[val].nbBits);
+ return c;
+}
+
+#define HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr) \
+ *ptr++ = HUF_decodeSymbolX1(DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX1_1(ptr, DStreamPtr) \
+ if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
+ HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr)
+
+#define HUF_DECODE_SYMBOLX1_2(ptr, DStreamPtr) \
+ if (MEM_64bits()) \
+ HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr)
+
+HINT_INLINE size_t
+HUF_decodeStreamX1(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX1* const dt, const U32 dtLog)
+{
+ BYTE* const pStart = p;
+
+ /* up to 4 symbols at a time */
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-3)) {
+ HUF_DECODE_SYMBOLX1_2(p, bitDPtr);
+ HUF_DECODE_SYMBOLX1_1(p, bitDPtr);
+ HUF_DECODE_SYMBOLX1_2(p, bitDPtr);
+ HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
+ }
+
+ /* [0-3] symbols remaining */
+ if (MEM_32bits())
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd))
+ HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
+
+ /* no more data to retrieve from bitstream, no need to reload */
+ while (p < pEnd)
+ HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
+
+ return pEnd-pStart;
+}
+
+FORCE_INLINE_TEMPLATE size_t
+HUF_decompress1X1_usingDTable_internal_body(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ BYTE* op = (BYTE*)dst;
+ BYTE* const oend = op + dstSize;
+ const void* dtPtr = DTable + 1;
+ const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr;
+ BIT_DStream_t bitD;
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
+ U32 const dtLog = dtd.tableLog;
+
+ CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) );
+
+ HUF_decodeStreamX1(op, &bitD, oend, dt, dtLog);
+
+ if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);
+
+ return dstSize;
+}
+
+FORCE_INLINE_TEMPLATE size_t
+HUF_decompress4X1_usingDTable_internal_body(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ /* Check */
+ if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
+
+ { const BYTE* const istart = (const BYTE*) cSrc;
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* const oend = ostart + dstSize;
+ const void* const dtPtr = DTable + 1;
+ const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr;
+
+ /* Init */
+ BIT_DStream_t bitD1;
+ BIT_DStream_t bitD2;
+ BIT_DStream_t bitD3;
+ BIT_DStream_t bitD4;
+ size_t const length1 = MEM_readLE16(istart);
+ size_t const length2 = MEM_readLE16(istart+2);
+ size_t const length3 = MEM_readLE16(istart+4);
+ size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
+ const BYTE* const istart1 = istart + 6; /* jumpTable */
+ const BYTE* const istart2 = istart1 + length1;
+ const BYTE* const istart3 = istart2 + length2;
+ const BYTE* const istart4 = istart3 + length3;
+ const size_t segmentSize = (dstSize+3) / 4;
+ BYTE* const opStart2 = ostart + segmentSize;
+ BYTE* const opStart3 = opStart2 + segmentSize;
+ BYTE* const opStart4 = opStart3 + segmentSize;
+ BYTE* op1 = ostart;
+ BYTE* op2 = opStart2;
+ BYTE* op3 = opStart3;
+ BYTE* op4 = opStart4;
+ U32 endSignal = BIT_DStream_unfinished;
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
+ U32 const dtLog = dtd.tableLog;
+
+ if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
+ CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
+ CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );
+ CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );
+ CHECK_F( BIT_initDStream(&bitD4, istart4, length4) );
+
+ /* up to 16 symbols per loop (4 symbols per stream) in 64-bit mode */
+ endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+ while ( (endSignal==BIT_DStream_unfinished) && (op4<(oend-3)) ) {
+ HUF_DECODE_SYMBOLX1_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX1_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX1_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX1_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX1_1(op1, &bitD1);
+ HUF_DECODE_SYMBOLX1_1(op2, &bitD2);
+ HUF_DECODE_SYMBOLX1_1(op3, &bitD3);
+ HUF_DECODE_SYMBOLX1_1(op4, &bitD4);
+ HUF_DECODE_SYMBOLX1_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX1_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX1_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX1_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX1_0(op1, &bitD1);
+ HUF_DECODE_SYMBOLX1_0(op2, &bitD2);
+ HUF_DECODE_SYMBOLX1_0(op3, &bitD3);
+ HUF_DECODE_SYMBOLX1_0(op4, &bitD4);
+ BIT_reloadDStream(&bitD1);
+ BIT_reloadDStream(&bitD2);
+ BIT_reloadDStream(&bitD3);
+ BIT_reloadDStream(&bitD4);
+ }
+
+ /* check corruption */
+ /* note : should not be necessary : op# advance in lock step, and we control op4.
+ * but curiously, binary generated by gcc 7.2 & 7.3 with -mbmi2 runs faster when >=1 test is present */
+ if (op1 > opStart2) return ERROR(corruption_detected);
+ if (op2 > opStart3) return ERROR(corruption_detected);
+ if (op3 > opStart4) return ERROR(corruption_detected);
+ /* note : op4 supposed already verified within main loop */
+
+ /* finish bitStreams one by one */
+ HUF_decodeStreamX1(op1, &bitD1, opStart2, dt, dtLog);
+ HUF_decodeStreamX1(op2, &bitD2, opStart3, dt, dtLog);
+ HUF_decodeStreamX1(op3, &bitD3, opStart4, dt, dtLog);
+ HUF_decodeStreamX1(op4, &bitD4, oend, dt, dtLog);
+
+ /* check */
+ { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
+ if (!endCheck) return ERROR(corruption_detected); }
+
+ /* decoded size */
+ return dstSize;
+ }
+}
+
+
+typedef size_t (*HUF_decompress_usingDTable_t)(void *dst, size_t dstSize,
+ const void *cSrc,
+ size_t cSrcSize,
+ const HUF_DTable *DTable);
+#if DYNAMIC_BMI2
+
+#define HUF_DGEN(fn) \
+ \
+ static size_t fn##_default( \
+ void* dst, size_t dstSize, \
+ const void* cSrc, size_t cSrcSize, \
+ const HUF_DTable* DTable) \
+ { \
+ return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \
+ } \
+ \
+ static TARGET_ATTRIBUTE("bmi2") size_t fn##_bmi2( \
+ void* dst, size_t dstSize, \
+ const void* cSrc, size_t cSrcSize, \
+ const HUF_DTable* DTable) \
+ { \
+ return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \
+ } \
+ \
+ static size_t fn(void* dst, size_t dstSize, void const* cSrc, \
+ size_t cSrcSize, HUF_DTable const* DTable, int bmi2) \
+ { \
+ if (bmi2) { \
+ return fn##_bmi2(dst, dstSize, cSrc, cSrcSize, DTable); \
+ } \
+ return fn##_default(dst, dstSize, cSrc, cSrcSize, DTable); \
+ }
+
+#else
+
+#define HUF_DGEN(fn) \
+ static size_t fn(void* dst, size_t dstSize, void const* cSrc, \
+ size_t cSrcSize, HUF_DTable const* DTable, int bmi2) \
+ { \
+ (void)bmi2; \
+ return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \
+ }
+
+#endif
+
+HUF_DGEN(HUF_decompress1X1_usingDTable_internal)
+HUF_DGEN(HUF_decompress4X1_usingDTable_internal)
+
+
+
+size_t HUF_decompress1X1_usingDTable(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ DTableDesc dtd = HUF_getDTableDesc(DTable);
+ if (dtd.tableType != 0) return ERROR(GENERIC);
+ return HUF_decompress1X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+}
+
+size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ void* workSpace, size_t wkspSize)
+{
+ const BYTE* ip = (const BYTE*) cSrc;
+
+ size_t const hSize = HUF_readDTableX1_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize);
+ if (HUF_isError(hSize)) return hSize;
+ if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += hSize; cSrcSize -= hSize;
+
+ return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);
+}
+
+
+size_t HUF_decompress1X1_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize)
+{
+ U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
+ return HUF_decompress1X1_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize,
+ workSpace, sizeof(workSpace));
+}
+
+size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ HUF_CREATE_STATIC_DTABLEX1(DTable, HUF_TABLELOG_MAX);
+ return HUF_decompress1X1_DCtx (DTable, dst, dstSize, cSrc, cSrcSize);
+}
+
+size_t HUF_decompress4X1_usingDTable(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ DTableDesc dtd = HUF_getDTableDesc(DTable);
+ if (dtd.tableType != 0) return ERROR(GENERIC);
+ return HUF_decompress4X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+}
+
+static size_t HUF_decompress4X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ void* workSpace, size_t wkspSize, int bmi2)
+{
+ const BYTE* ip = (const BYTE*) cSrc;
+
+ size_t const hSize = HUF_readDTableX1_wksp (dctx, cSrc, cSrcSize,
+ workSpace, wkspSize);
+ if (HUF_isError(hSize)) return hSize;
+ if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += hSize; cSrcSize -= hSize;
+
+ return HUF_decompress4X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
+}
+
+size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ void* workSpace, size_t wkspSize)
+{
+ return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, 0);
+}
+
+
+size_t HUF_decompress4X1_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
+ return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
+ workSpace, sizeof(workSpace));
+}
+size_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ HUF_CREATE_STATIC_DTABLEX1(DTable, HUF_TABLELOG_MAX);
+ return HUF_decompress4X1_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
+}
+
+
+/* *************************/
+/* double-symbols decoding */
+/* *************************/
+
+typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX2; /* double-symbols decoding */
+typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
+typedef U32 rankValCol_t[HUF_TABLELOG_MAX + 1];
+typedef rankValCol_t rankVal_t[HUF_TABLELOG_MAX];
+
+
+/* HUF_fillDTableX2Level2() :
+ * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */
+static void HUF_fillDTableX2Level2(HUF_DEltX2* DTable, U32 sizeLog, const U32 consumed,
+ const U32* rankValOrigin, const int minWeight,
+ const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,
+ U32 nbBitsBaseline, U16 baseSeq)
+{
+ HUF_DEltX2 DElt;
+ U32 rankVal[HUF_TABLELOG_MAX + 1];
+
+ /* get pre-calculated rankVal */
+ memcpy(rankVal, rankValOrigin, sizeof(rankVal));
+
+ /* fill skipped values */
+ if (minWeight>1) {
+ U32 i, skipSize = rankVal[minWeight];
+ MEM_writeLE16(&(DElt.sequence), baseSeq);
+ DElt.nbBits = (BYTE)(consumed);
+ DElt.length = 1;
+ for (i = 0; i < skipSize; i++)
+ DTable[i] = DElt;
+ }
+
+ /* fill DTable */
+ { U32 s; for (s=0; s<sortedListSize; s++) { /* note : sortedSymbols already skipped */
+ const U32 symbol = sortedSymbols[s].symbol;
+ const U32 weight = sortedSymbols[s].weight;
+ const U32 nbBits = nbBitsBaseline - weight;
+ const U32 length = 1 << (sizeLog-nbBits);
+ const U32 start = rankVal[weight];
+ U32 i = start;
+ const U32 end = start + length;
+
+ MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
+ DElt.nbBits = (BYTE)(nbBits + consumed);
+ DElt.length = 2;
+ do { DTable[i++] = DElt; } while (i<end); /* since length >= 1 */
+
+ rankVal[weight] += length;
+ } }
+}
+
+
+static void HUF_fillDTableX2(HUF_DEltX2* DTable, const U32 targetLog,
+ const sortedSymbol_t* sortedList, const U32 sortedListSize,
+ const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
+ const U32 nbBitsBaseline)
+{
+ U32 rankVal[HUF_TABLELOG_MAX + 1];
+ const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */
+ const U32 minBits = nbBitsBaseline - maxWeight;
+ U32 s;
+
+ memcpy(rankVal, rankValOrigin, sizeof(rankVal));
+
+ /* fill DTable */
+ for (s=0; s<sortedListSize; s++) {
+ const U16 symbol = sortedList[s].symbol;
+ const U32 weight = sortedList[s].weight;
+ const U32 nbBits = nbBitsBaseline - weight;
+ const U32 start = rankVal[weight];
+ const U32 length = 1 << (targetLog-nbBits);
+
+ if (targetLog-nbBits >= minBits) { /* enough room for a second symbol */
+ U32 sortedRank;
+ int minWeight = nbBits + scaleLog;
+ if (minWeight < 1) minWeight = 1;
+ sortedRank = rankStart[minWeight];
+ HUF_fillDTableX2Level2(DTable+start, targetLog-nbBits, nbBits,
+ rankValOrigin[nbBits], minWeight,
+ sortedList+sortedRank, sortedListSize-sortedRank,
+ nbBitsBaseline, symbol);
+ } else {
+ HUF_DEltX2 DElt;
+ MEM_writeLE16(&(DElt.sequence), symbol);
+ DElt.nbBits = (BYTE)(nbBits);
+ DElt.length = 1;
+ { U32 const end = start + length;
+ U32 u;
+ for (u = start; u < end; u++) DTable[u] = DElt;
+ } }
+ rankVal[weight] += length;
+ }
+}
+
+size_t HUF_readDTableX2_wksp(HUF_DTable* DTable,
+ const void* src, size_t srcSize,
+ void* workSpace, size_t wkspSize)
+{
+ U32 tableLog, maxW, sizeOfSort, nbSymbols;
+ DTableDesc dtd = HUF_getDTableDesc(DTable);
+ U32 const maxTableLog = dtd.maxTableLog;
+ size_t iSize;
+ void* dtPtr = DTable+1; /* force compiler to avoid strict-aliasing */
+ HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr;
+ U32 *rankStart;
+
+ rankValCol_t* rankVal;
+ U32* rankStats;
+ U32* rankStart0;
+ sortedSymbol_t* sortedSymbol;
+ BYTE* weightList;
+ size_t spaceUsed32 = 0;
+
+ rankVal = (rankValCol_t *)((U32 *)workSpace + spaceUsed32);
+ spaceUsed32 += (sizeof(rankValCol_t) * HUF_TABLELOG_MAX) >> 2;
+ rankStats = (U32 *)workSpace + spaceUsed32;
+ spaceUsed32 += HUF_TABLELOG_MAX + 1;
+ rankStart0 = (U32 *)workSpace + spaceUsed32;
+ spaceUsed32 += HUF_TABLELOG_MAX + 2;
+ sortedSymbol = (sortedSymbol_t *)workSpace + (spaceUsed32 * sizeof(U32)) / sizeof(sortedSymbol_t);
+ spaceUsed32 += HUF_ALIGN(sizeof(sortedSymbol_t) * (HUF_SYMBOLVALUE_MAX + 1), sizeof(U32)) >> 2;
+ weightList = (BYTE *)((U32 *)workSpace + spaceUsed32);
+ spaceUsed32 += HUF_ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2;
+
+ if ((spaceUsed32 << 2) > wkspSize) return ERROR(tableLog_tooLarge);
+
+ rankStart = rankStart0 + 1;
+ memset(rankStats, 0, sizeof(U32) * (2 * HUF_TABLELOG_MAX + 2 + 1));
+
+ DEBUG_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(HUF_DTable)); /* if compiler fails here, assertion is wrong */
+ if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
+ /* memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */
+
+ iSize = HUF_readStats(weightList, HUF_SYMBOLVALUE_MAX + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);
+ if (HUF_isError(iSize)) return iSize;
+
+ /* check result */
+ if (tableLog > maxTableLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */
+
+ /* find maxWeight */
+ for (maxW = tableLog; rankStats[maxW]==0; maxW--) {} /* necessarily finds a solution before 0 */
+
+ /* Get start index of each weight */
+ { U32 w, nextRankStart = 0;
+ for (w=1; w<maxW+1; w++) {
+ U32 current = nextRankStart;
+ nextRankStart += rankStats[w];
+ rankStart[w] = current;
+ }
+ rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/
+ sizeOfSort = nextRankStart;
+ }
+
+ /* sort symbols by weight */
+ { U32 s;
+ for (s=0; s<nbSymbols; s++) {
+ U32 const w = weightList[s];
+ U32 const r = rankStart[w]++;
+ sortedSymbol[r].symbol = (BYTE)s;
+ sortedSymbol[r].weight = (BYTE)w;
+ }
+ rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */
+ }
+
+ /* Build rankVal */
+ { U32* const rankVal0 = rankVal[0];
+ { int const rescale = (maxTableLog-tableLog) - 1; /* tableLog <= maxTableLog */
+ U32 nextRankVal = 0;
+ U32 w;
+ for (w=1; w<maxW+1; w++) {
+ U32 current = nextRankVal;
+ nextRankVal += rankStats[w] << (w+rescale);
+ rankVal0[w] = current;
+ } }
+ { U32 const minBits = tableLog+1 - maxW;
+ U32 consumed;
+ for (consumed = minBits; consumed < maxTableLog - minBits + 1; consumed++) {
+ U32* const rankValPtr = rankVal[consumed];
+ U32 w;
+ for (w = 1; w < maxW+1; w++) {
+ rankValPtr[w] = rankVal0[w] >> consumed;
+ } } } }
+
+ HUF_fillDTableX2(dt, maxTableLog,
+ sortedSymbol, sizeOfSort,
+ rankStart0, rankVal, maxW,
+ tableLog+1);
+
+ dtd.tableLog = (BYTE)maxTableLog;
+ dtd.tableType = 1;
+ memcpy(DTable, &dtd, sizeof(dtd));
+ return iSize;
+}
+
+size_t HUF_readDTableX2(HUF_DTable* DTable, const void* src, size_t srcSize)
+{
+ U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
+ return HUF_readDTableX2_wksp(DTable, src, srcSize,
+ workSpace, sizeof(workSpace));
+}
+
+
+FORCE_INLINE_TEMPLATE U32
+HUF_decodeSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog)
+{
+ size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
+ memcpy(op, dt+val, 2);
+ BIT_skipBits(DStream, dt[val].nbBits);
+ return dt[val].length;
+}
+
+FORCE_INLINE_TEMPLATE U32
+HUF_decodeLastSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog)
+{
+ size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
+ memcpy(op, dt+val, 1);
+ if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits);
+ else {
+ if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) {
+ BIT_skipBits(DStream, dt[val].nbBits);
+ if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
+ /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
+ DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);
+ } }
+ return 1;
+}
+
+#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
+ ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
+ if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
+ ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
+ if (MEM_64bits()) \
+ ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
+
+HINT_INLINE size_t
+HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd,
+ const HUF_DEltX2* const dt, const U32 dtLog)
+{
+ BYTE* const pStart = p;
+
+ /* up to 8 symbols at a time */
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) {
+ HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
+ HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
+ HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+ }
+
+ /* closer to end : up to 2 symbols at a time */
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2))
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+
+ while (p <= pEnd-2)
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr); /* no need to reload : reached the end of DStream */
+
+ if (p < pEnd)
+ p += HUF_decodeLastSymbolX2(p, bitDPtr, dt, dtLog);
+
+ return p-pStart;
+}
+
+FORCE_INLINE_TEMPLATE size_t
+HUF_decompress1X2_usingDTable_internal_body(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ BIT_DStream_t bitD;
+
+ /* Init */
+ CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) );
+
+ /* decode */
+ { BYTE* const ostart = (BYTE*) dst;
+ BYTE* const oend = ostart + dstSize;
+ const void* const dtPtr = DTable+1; /* force compiler to not use strict-aliasing */
+ const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
+ HUF_decodeStreamX2(ostart, &bitD, oend, dt, dtd.tableLog);
+ }
+
+ /* check */
+ if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);
+
+ /* decoded size */
+ return dstSize;
+}
+
+
+FORCE_INLINE_TEMPLATE size_t
+HUF_decompress4X2_usingDTable_internal_body(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
+
+ { const BYTE* const istart = (const BYTE*) cSrc;
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* const oend = ostart + dstSize;
+ const void* const dtPtr = DTable+1;
+ const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
+
+ /* Init */
+ BIT_DStream_t bitD1;
+ BIT_DStream_t bitD2;
+ BIT_DStream_t bitD3;
+ BIT_DStream_t bitD4;
+ size_t const length1 = MEM_readLE16(istart);
+ size_t const length2 = MEM_readLE16(istart+2);
+ size_t const length3 = MEM_readLE16(istart+4);
+ size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
+ const BYTE* const istart1 = istart + 6; /* jumpTable */
+ const BYTE* const istart2 = istart1 + length1;
+ const BYTE* const istart3 = istart2 + length2;
+ const BYTE* const istart4 = istart3 + length3;
+ size_t const segmentSize = (dstSize+3) / 4;
+ BYTE* const opStart2 = ostart + segmentSize;
+ BYTE* const opStart3 = opStart2 + segmentSize;
+ BYTE* const opStart4 = opStart3 + segmentSize;
+ BYTE* op1 = ostart;
+ BYTE* op2 = opStart2;
+ BYTE* op3 = opStart3;
+ BYTE* op4 = opStart4;
+ U32 endSignal;
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
+ U32 const dtLog = dtd.tableLog;
+
+ if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
+ CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
+ CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );
+ CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );
+ CHECK_F( BIT_initDStream(&bitD4, istart4, length4) );
+
+ /* 16-32 symbols per loop (4-8 symbols per stream) */
+ endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+ for ( ; (endSignal==BIT_DStream_unfinished) & (op4<(oend-(sizeof(bitD4.bitContainer)-1))) ; ) {
+ HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
+ HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
+
+ endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+ }
+
+ /* check corruption */
+ if (op1 > opStart2) return ERROR(corruption_detected);
+ if (op2 > opStart3) return ERROR(corruption_detected);
+ if (op3 > opStart4) return ERROR(corruption_detected);
+ /* note : op4 already verified within main loop */
+
+ /* finish bitStreams one by one */
+ HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
+ HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
+ HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
+ HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog);
+
+ /* check */
+ { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
+ if (!endCheck) return ERROR(corruption_detected); }
+
+ /* decoded size */
+ return dstSize;
+ }
+}
+
+HUF_DGEN(HUF_decompress1X2_usingDTable_internal)
+HUF_DGEN(HUF_decompress4X2_usingDTable_internal)
+
+size_t HUF_decompress1X2_usingDTable(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ DTableDesc dtd = HUF_getDTableDesc(DTable);
+ if (dtd.tableType != 1) return ERROR(GENERIC);
+ return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+}
+
+size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ void* workSpace, size_t wkspSize)
+{
+ const BYTE* ip = (const BYTE*) cSrc;
+
+ size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize,
+ workSpace, wkspSize);
+ if (HUF_isError(hSize)) return hSize;
+ if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += hSize; cSrcSize -= hSize;
+
+ return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);
+}
+
+
+size_t HUF_decompress1X2_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize)
+{
+ U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
+ return HUF_decompress1X2_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize,
+ workSpace, sizeof(workSpace));
+}
+
+size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX);
+ return HUF_decompress1X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
+}
+
+size_t HUF_decompress4X2_usingDTable(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ DTableDesc dtd = HUF_getDTableDesc(DTable);
+ if (dtd.tableType != 1) return ERROR(GENERIC);
+ return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+}
+
+static size_t HUF_decompress4X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ void* workSpace, size_t wkspSize, int bmi2)
+{
+ const BYTE* ip = (const BYTE*) cSrc;
+
+ size_t hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize,
+ workSpace, wkspSize);
+ if (HUF_isError(hSize)) return hSize;
+ if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += hSize; cSrcSize -= hSize;
+
+ return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
+}
+
+size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ void* workSpace, size_t wkspSize)
+{
+ return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, /* bmi2 */ 0);
+}
+
+
+size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize)
+{
+ U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
+ return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
+ workSpace, sizeof(workSpace));
+}
+
+size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX);
+ return HUF_decompress4X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
+}
+
+
+/* ***********************************/
+/* Universal decompression selectors */
+/* ***********************************/
+
+size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
+ return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :
+ HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+}
+
+size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
+ return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :
+ HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+}
+
+
+typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
+static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =
+{
+ /* single, double, quad */
+ {{0,0}, {1,1}, {2,2}}, /* Q==0 : impossible */
+ {{0,0}, {1,1}, {2,2}}, /* Q==1 : impossible */
+ {{ 38,130}, {1313, 74}, {2151, 38}}, /* Q == 2 : 12-18% */
+ {{ 448,128}, {1353, 74}, {2238, 41}}, /* Q == 3 : 18-25% */
+ {{ 556,128}, {1353, 74}, {2238, 47}}, /* Q == 4 : 25-32% */
+ {{ 714,128}, {1418, 74}, {2436, 53}}, /* Q == 5 : 32-38% */
+ {{ 883,128}, {1437, 74}, {2464, 61}}, /* Q == 6 : 38-44% */
+ {{ 897,128}, {1515, 75}, {2622, 68}}, /* Q == 7 : 44-50% */
+ {{ 926,128}, {1613, 75}, {2730, 75}}, /* Q == 8 : 50-56% */
+ {{ 947,128}, {1729, 77}, {3359, 77}}, /* Q == 9 : 56-62% */
+ {{1107,128}, {2083, 81}, {4006, 84}}, /* Q ==10 : 62-69% */
+ {{1177,128}, {2379, 87}, {4785, 88}}, /* Q ==11 : 69-75% */
+ {{1242,128}, {2415, 93}, {5155, 84}}, /* Q ==12 : 75-81% */
+ {{1349,128}, {2644,106}, {5260,106}}, /* Q ==13 : 81-87% */
+ {{1455,128}, {2422,124}, {4174,124}}, /* Q ==14 : 87-93% */
+ {{ 722,128}, {1891,145}, {1936,146}}, /* Q ==15 : 93-99% */
+};
+
+/** HUF_selectDecoder() :
+ * Tells which decoder is likely to decode faster,
+ * based on a set of pre-computed metrics.
+ * @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 .
+ * Assumption : 0 < dstSize <= 128 KB */
+U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize)
+{
+ assert(dstSize > 0);
+ assert(dstSize <= 128*1024);
+ /* decoder timing evaluation */
+ { U32 const Q = (cSrcSize >= dstSize) ? 15 : (U32)(cSrcSize * 16 / dstSize); /* Q < 16 */
+ U32 const D256 = (U32)(dstSize >> 8);
+ U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256);
+ U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256);
+ DTime1 += DTime1 >> 3; /* advantage to algorithm using less memory, to reduce cache eviction */
+ return DTime1 < DTime0;
+} }
+
+
+typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
+
+size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ static const decompressionAlgo decompress[2] = { HUF_decompress4X1, HUF_decompress4X2 };
+
+ /* validation checks */
+ if (dstSize == 0) return ERROR(dstSize_tooSmall);
+ if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */
+ if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
+ if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
+
+ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
+ return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);
+ }
+}
+
+size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ /* validation checks */
+ if (dstSize == 0) return ERROR(dstSize_tooSmall);
+ if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */
+ if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
+ if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
+
+ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
+ return algoNb ? HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) :
+ HUF_decompress4X1_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ;
+ }
+}
+
+size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+ U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
+ return HUF_decompress4X_hufOnly_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
+ workSpace, sizeof(workSpace));
+}
+
+
+size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst,
+ size_t dstSize, const void* cSrc,
+ size_t cSrcSize, void* workSpace,
+ size_t wkspSize)
+{
+ /* validation checks */
+ if (dstSize == 0) return ERROR(dstSize_tooSmall);
+ if (cSrcSize == 0) return ERROR(corruption_detected);
+
+ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
+ return algoNb ? HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize):
+ HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
+ }
+}
+
+size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ void* workSpace, size_t wkspSize)
+{
+ /* validation checks */
+ if (dstSize == 0) return ERROR(dstSize_tooSmall);
+ if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */
+ if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
+ if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
+
+ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
+ return algoNb ? HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
+ cSrcSize, workSpace, wkspSize):
+ HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc,
+ cSrcSize, workSpace, wkspSize);
+ }
+}
+
+size_t HUF_decompress1X_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize)
+{
+ U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
+ return HUF_decompress1X_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
+ workSpace, sizeof(workSpace));
+}
+
+
+size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
+{
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
+ return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
+ HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+}
+
+size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
+{
+ const BYTE* ip = (const BYTE*) cSrc;
+
+ size_t const hSize = HUF_readDTableX1_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize);
+ if (HUF_isError(hSize)) return hSize;
+ if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += hSize; cSrcSize -= hSize;
+
+ return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
+}
+
+size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
+{
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
+ return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
+ HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+}
+
+size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
+{
+ /* validation checks */
+ if (dstSize == 0) return ERROR(dstSize_tooSmall);
+ if (cSrcSize == 0) return ERROR(corruption_detected);
+
+ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
+ return algoNb ? HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2) :
+ HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
+ }
+}
diff --git a/grub-core/lib/zstd/mem.h b/grub-core/lib/zstd/mem.h
new file mode 100644
index 0000000..2051bca
--- /dev/null
+++ b/grub-core/lib/zstd/mem.h
@@ -0,0 +1,374 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef MEM_H_MODULE
+#define MEM_H_MODULE
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/*-****************************************
+* Dependencies
+******************************************/
+#include <stddef.h> /* size_t, ptrdiff_t */
+#include <string.h> /* memcpy */
+
+
+/*-****************************************
+* Compiler specifics
+******************************************/
+#if defined(_MSC_VER) /* Visual Studio */
+# include <stdlib.h> /* _byteswap_ulong */
+# include <intrin.h> /* _byteswap_* */
+#endif
+#if defined(__GNUC__)
+# define MEM_STATIC static __inline __attribute__((unused))
+#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+# define MEM_STATIC static inline
+#elif defined(_MSC_VER)
+# define MEM_STATIC static __inline
+#else
+# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
+#endif
+
+/* code only tested on 32 and 64 bits systems */
+#define MEM_STATIC_ASSERT(c) { enum { MEM_static_assert = 1/(int)(!!(c)) }; }
+MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); }
+
+
+/*-**************************************************************
+* Basic Types
+*****************************************************************/
+#if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
+# include <stdint.h>
+ typedef uint8_t BYTE;
+ typedef uint16_t U16;
+ typedef int16_t S16;
+ typedef uint32_t U32;
+ typedef int32_t S32;
+ typedef uint64_t U64;
+ typedef int64_t S64;
+#else
+# include <limits.h>
+#if CHAR_BIT != 8
+# error "this implementation requires char to be exactly 8-bit type"
+#endif
+ typedef unsigned char BYTE;
+#if USHRT_MAX != 65535
+# error "this implementation requires short to be exactly 16-bit type"
+#endif
+ typedef unsigned short U16;
+ typedef signed short S16;
+#if UINT_MAX != 4294967295
+# error "this implementation requires int to be exactly 32-bit type"
+#endif
+ typedef unsigned int U32;
+ typedef signed int S32;
+/* note : there are no limits defined for long long type in C90.
+ * limits exist in C99, however, in such case, <stdint.h> is preferred */
+ typedef unsigned long long U64;
+ typedef signed long long S64;
+#endif
+
+
+/*-**************************************************************
+* Memory I/O
+*****************************************************************/
+/* MEM_FORCE_MEMORY_ACCESS :
+ * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
+ * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
+ * The below switch allow to select different access method for improved performance.
+ * Method 0 (default) : use `memcpy()`. Safe and portable.
+ * Method 1 : `__packed` statement. It depends on compiler extension (i.e., not portable).
+ * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
+ * Method 2 : direct access. This method is portable but violate C standard.
+ * It can generate buggy code on targets depending on alignment.
+ * In some circumstances, it's the only known way to get the most performance (i.e. GCC + ARMv6)
+ * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
+ * Prefer these methods in priority order (0 > 1 > 2)
+ */
+#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
+# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
+# define MEM_FORCE_MEMORY_ACCESS 2
+# elif defined(__INTEL_COMPILER) || defined(__GNUC__)
+# define MEM_FORCE_MEMORY_ACCESS 1
+# endif
+#endif
+
+MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; }
+MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; }
+
+MEM_STATIC unsigned MEM_isLittleEndian(void)
+{
+ const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
+ return one.c[0];
+}
+
+#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)
+
+/* violates C standard, by lying on structure alignment.
+Only use if no other choice to achieve best performance on target platform */
+MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }
+MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }
+MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }
+MEM_STATIC size_t MEM_readST(const void* memPtr) { return *(const size_t*) memPtr; }
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
+MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
+MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; }
+
+#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)
+
+/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
+/* currently only defined for gcc and icc */
+#if defined(_MSC_VER) || (defined(__INTEL_COMPILER) && defined(WIN32))
+ __pragma( pack(push, 1) )
+ typedef struct { U16 v; } unalign16;
+ typedef struct { U32 v; } unalign32;
+ typedef struct { U64 v; } unalign64;
+ typedef struct { size_t v; } unalignArch;
+ __pragma( pack(pop) )
+#else
+ typedef struct { U16 v; } __attribute__((packed)) unalign16;
+ typedef struct { U32 v; } __attribute__((packed)) unalign32;
+ typedef struct { U64 v; } __attribute__((packed)) unalign64;
+ typedef struct { size_t v; } __attribute__((packed)) unalignArch;
+#endif
+
+MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign16*)ptr)->v; }
+MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign32*)ptr)->v; }
+MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign64*)ptr)->v; }
+MEM_STATIC size_t MEM_readST(const void* ptr) { return ((const unalignArch*)ptr)->v; }
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign16*)memPtr)->v = value; }
+MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign32*)memPtr)->v = value; }
+MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign64*)memPtr)->v = value; }
+
+#else
+
+/* default method, safe and standard.
+ can sometimes prove slower */
+
+MEM_STATIC U16 MEM_read16(const void* memPtr)
+{
+ U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC U32 MEM_read32(const void* memPtr)
+{
+ U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC U64 MEM_read64(const void* memPtr)
+{
+ U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC size_t MEM_readST(const void* memPtr)
+{
+ size_t val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value)
+{
+ memcpy(memPtr, &value, sizeof(value));
+}
+
+MEM_STATIC void MEM_write32(void* memPtr, U32 value)
+{
+ memcpy(memPtr, &value, sizeof(value));
+}
+
+MEM_STATIC void MEM_write64(void* memPtr, U64 value)
+{
+ memcpy(memPtr, &value, sizeof(value));
+}
+
+#endif /* MEM_FORCE_MEMORY_ACCESS */
+
+MEM_STATIC U32 MEM_swap32(U32 in)
+{
+#if defined(_MSC_VER) /* Visual Studio */
+ return _byteswap_ulong(in);
+#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)
+ return __builtin_bswap32(in);
+#else
+ return ((in << 24) & 0xff000000 ) |
+ ((in << 8) & 0x00ff0000 ) |
+ ((in >> 8) & 0x0000ff00 ) |
+ ((in >> 24) & 0x000000ff );
+#endif
+}
+
+MEM_STATIC U64 MEM_swap64(U64 in)
+{
+#if defined(_MSC_VER) /* Visual Studio */
+ return _byteswap_uint64(in);
+#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)
+ return __builtin_bswap64(in);
+#else
+ return ((in << 56) & 0xff00000000000000ULL) |
+ ((in << 40) & 0x00ff000000000000ULL) |
+ ((in << 24) & 0x0000ff0000000000ULL) |
+ ((in << 8) & 0x000000ff00000000ULL) |
+ ((in >> 8) & 0x00000000ff000000ULL) |
+ ((in >> 24) & 0x0000000000ff0000ULL) |
+ ((in >> 40) & 0x000000000000ff00ULL) |
+ ((in >> 56) & 0x00000000000000ffULL);
+#endif
+}
+
+MEM_STATIC size_t MEM_swapST(size_t in)
+{
+ if (MEM_32bits())
+ return (size_t)MEM_swap32((U32)in);
+ else
+ return (size_t)MEM_swap64((U64)in);
+}
+
+/*=== Little endian r/w ===*/
+
+MEM_STATIC U16 MEM_readLE16(const void* memPtr)
+{
+ if (MEM_isLittleEndian())
+ return MEM_read16(memPtr);
+ else {
+ const BYTE* p = (const BYTE*)memPtr;
+ return (U16)(p[0] + (p[1]<<8));
+ }
+}
+
+MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
+{
+ if (MEM_isLittleEndian()) {
+ MEM_write16(memPtr, val);
+ } else {
+ BYTE* p = (BYTE*)memPtr;
+ p[0] = (BYTE)val;
+ p[1] = (BYTE)(val>>8);
+ }
+}
+
+MEM_STATIC U32 MEM_readLE24(const void* memPtr)
+{
+ return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16);
+}
+
+MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val)
+{
+ MEM_writeLE16(memPtr, (U16)val);
+ ((BYTE*)memPtr)[2] = (BYTE)(val>>16);
+}
+
+MEM_STATIC U32 MEM_readLE32(const void* memPtr)
+{
+ if (MEM_isLittleEndian())
+ return MEM_read32(memPtr);
+ else
+ return MEM_swap32(MEM_read32(memPtr));
+}
+
+MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32)
+{
+ if (MEM_isLittleEndian())
+ MEM_write32(memPtr, val32);
+ else
+ MEM_write32(memPtr, MEM_swap32(val32));
+}
+
+MEM_STATIC U64 MEM_readLE64(const void* memPtr)
+{
+ if (MEM_isLittleEndian())
+ return MEM_read64(memPtr);
+ else
+ return MEM_swap64(MEM_read64(memPtr));
+}
+
+MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64)
+{
+ if (MEM_isLittleEndian())
+ MEM_write64(memPtr, val64);
+ else
+ MEM_write64(memPtr, MEM_swap64(val64));
+}
+
+MEM_STATIC size_t MEM_readLEST(const void* memPtr)
+{
+ if (MEM_32bits())
+ return (size_t)MEM_readLE32(memPtr);
+ else
+ return (size_t)MEM_readLE64(memPtr);
+}
+
+MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val)
+{
+ if (MEM_32bits())
+ MEM_writeLE32(memPtr, (U32)val);
+ else
+ MEM_writeLE64(memPtr, (U64)val);
+}
+
+/*=== Big endian r/w ===*/
+
+MEM_STATIC U32 MEM_readBE32(const void* memPtr)
+{
+ if (MEM_isLittleEndian())
+ return MEM_swap32(MEM_read32(memPtr));
+ else
+ return MEM_read32(memPtr);
+}
+
+MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32)
+{
+ if (MEM_isLittleEndian())
+ MEM_write32(memPtr, MEM_swap32(val32));
+ else
+ MEM_write32(memPtr, val32);
+}
+
+MEM_STATIC U64 MEM_readBE64(const void* memPtr)
+{
+ if (MEM_isLittleEndian())
+ return MEM_swap64(MEM_read64(memPtr));
+ else
+ return MEM_read64(memPtr);
+}
+
+MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64)
+{
+ if (MEM_isLittleEndian())
+ MEM_write64(memPtr, MEM_swap64(val64));
+ else
+ MEM_write64(memPtr, val64);
+}
+
+MEM_STATIC size_t MEM_readBEST(const void* memPtr)
+{
+ if (MEM_32bits())
+ return (size_t)MEM_readBE32(memPtr);
+ else
+ return (size_t)MEM_readBE64(memPtr);
+}
+
+MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val)
+{
+ if (MEM_32bits())
+ MEM_writeBE32(memPtr, (U32)val);
+ else
+ MEM_writeBE64(memPtr, (U64)val);
+}
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* MEM_H_MODULE */
diff --git a/grub-core/lib/zstd/module.c b/grub-core/lib/zstd/module.c
new file mode 100644
index 0000000..e64d068
--- /dev/null
+++ b/grub-core/lib/zstd/module.c
@@ -0,0 +1,21 @@
+/*
+ * GRUB -- GRand Unified Bootloader
+ * Copyright (C) 2018 Free Software Foundation, Inc.
+ *
+ * GRUB is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * GRUB is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GRUB. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <grub/dl.h>
+
+GRUB_MOD_LICENSE ("GPLv3");
diff --git a/grub-core/lib/zstd/xxhash.c b/grub-core/lib/zstd/xxhash.c
new file mode 100644
index 0000000..532b816
--- /dev/null
+++ b/grub-core/lib/zstd/xxhash.c
@@ -0,0 +1,876 @@
+/*
+* xxHash - Fast Hash algorithm
+* Copyright (C) 2012-2016, Yann Collet
+*
+* BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following disclaimer
+* in the documentation and/or other materials provided with the
+* distribution.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+* You can contact the author at :
+* - xxHash homepage: http://www.xxhash.com
+* - xxHash source repository : https://github.com/Cyan4973/xxHash
+*/
+
+
+/* *************************************
+* Tuning parameters
+***************************************/
+/*!XXH_FORCE_MEMORY_ACCESS :
+ * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
+ * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
+ * The below switch allow to select different access method for improved performance.
+ * Method 0 (default) : use `memcpy()`. Safe and portable.
+ * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
+ * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
+ * Method 2 : direct access. This method doesn't depend on compiler but violate C standard.
+ * It can generate buggy code on targets which do not support unaligned memory accesses.
+ * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
+ * See http://stackoverflow.com/a/32095106/646947 for details.
+ * Prefer these methods in priority order (0 > 1 > 2)
+ */
+#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
+# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
+# define XXH_FORCE_MEMORY_ACCESS 2
+# elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
+ (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
+# define XXH_FORCE_MEMORY_ACCESS 1
+# endif
+#endif
+
+/*!XXH_ACCEPT_NULL_INPUT_POINTER :
+ * If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer.
+ * When this option is enabled, xxHash output for null input pointers will be the same as a null-length input.
+ * By default, this option is disabled. To enable it, uncomment below define :
+ */
+/* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */
+
+/*!XXH_FORCE_NATIVE_FORMAT :
+ * By default, xxHash library provides endian-independant Hash values, based on little-endian convention.
+ * Results are therefore identical for little-endian and big-endian CPU.
+ * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
+ * Should endian-independance be of no importance for your application, you may set the #define below to 1,
+ * to improve speed for Big-endian CPU.
+ * This option has no impact on Little_Endian CPU.
+ */
+#ifndef XXH_FORCE_NATIVE_FORMAT /* can be defined externally */
+# define XXH_FORCE_NATIVE_FORMAT 0
+#endif
+
+/*!XXH_FORCE_ALIGN_CHECK :
+ * This is a minor performance trick, only useful with lots of very small keys.
+ * It means : check for aligned/unaligned input.
+ * The check costs one initial branch per hash; set to 0 when the input data
+ * is guaranteed to be aligned.
+ */
+#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
+# if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
+# define XXH_FORCE_ALIGN_CHECK 0
+# else
+# define XXH_FORCE_ALIGN_CHECK 1
+# endif
+#endif
+
+
+/* *************************************
+* Includes & Memory related functions
+***************************************/
+/* Modify the local functions below should you wish to use some other memory routines */
+/* for malloc(), free() */
+#include <stdlib.h>
+#include <stddef.h> /* size_t */
+static void* XXH_malloc(size_t s) { return malloc(s); }
+static void XXH_free (void* p) { free(p); }
+/* for memcpy() */
+#include <string.h>
+static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); }
+
+#ifndef XXH_STATIC_LINKING_ONLY
+# define XXH_STATIC_LINKING_ONLY
+#endif
+#include "xxhash.h"
+
+
+/* *************************************
+* Compiler Specific Options
+***************************************/
+#if defined (__GNUC__) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
+# define INLINE_KEYWORD inline
+#else
+# define INLINE_KEYWORD
+#endif
+
+#if defined(__GNUC__)
+# define FORCE_INLINE_ATTR __attribute__((always_inline))
+#elif defined(_MSC_VER)
+# define FORCE_INLINE_ATTR __forceinline
+#else
+# define FORCE_INLINE_ATTR
+#endif
+
+#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR
+
+
+#ifdef _MSC_VER
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+#endif
+
+
+/* *************************************
+* Basic Types
+***************************************/
+#ifndef MEM_MODULE
+# define MEM_MODULE
+# if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
+# include <stdint.h>
+ typedef uint8_t BYTE;
+ typedef uint16_t U16;
+ typedef uint32_t U32;
+ typedef int32_t S32;
+ typedef uint64_t U64;
+# else
+ typedef unsigned char BYTE;
+ typedef unsigned short U16;
+ typedef unsigned int U32;
+ typedef signed int S32;
+ typedef unsigned long long U64; /* if your compiler doesn't support unsigned long long, replace by another 64-bit type here. Note that xxhash.h will also need to be updated. */
+# endif
+#endif
+
+
+#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
+
+/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
+static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; }
+static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; }
+
+#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
+
+/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
+/* currently only defined for gcc and icc */
+typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign;
+
+static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
+static U64 XXH_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
+
+#else
+
+/* portable and safe solution. Generally efficient.
+ * see : http://stackoverflow.com/a/32095106/646947
+ */
+
+static U32 XXH_read32(const void* memPtr)
+{
+ U32 val;
+ memcpy(&val, memPtr, sizeof(val));
+ return val;
+}
+
+static U64 XXH_read64(const void* memPtr)
+{
+ U64 val;
+ memcpy(&val, memPtr, sizeof(val));
+ return val;
+}
+
+#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
+
+
+/* ****************************************
+* Compiler-specific Functions and Macros
+******************************************/
+#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
+
+/* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */
+#if defined(_MSC_VER)
+# define XXH_rotl32(x,r) _rotl(x,r)
+# define XXH_rotl64(x,r) _rotl64(x,r)
+#else
+# define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
+# define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
+#endif
+
+#if defined(_MSC_VER) /* Visual Studio */
+# define XXH_swap32 _byteswap_ulong
+# define XXH_swap64 _byteswap_uint64
+#elif GCC_VERSION >= 403
+# define XXH_swap32 __builtin_bswap32
+# define XXH_swap64 __builtin_bswap64
+#else
+static U32 XXH_swap32 (U32 x)
+{
+ return ((x << 24) & 0xff000000 ) |
+ ((x << 8) & 0x00ff0000 ) |
+ ((x >> 8) & 0x0000ff00 ) |
+ ((x >> 24) & 0x000000ff );
+}
+static U64 XXH_swap64 (U64 x)
+{
+ return ((x << 56) & 0xff00000000000000ULL) |
+ ((x << 40) & 0x00ff000000000000ULL) |
+ ((x << 24) & 0x0000ff0000000000ULL) |
+ ((x << 8) & 0x000000ff00000000ULL) |
+ ((x >> 8) & 0x00000000ff000000ULL) |
+ ((x >> 24) & 0x0000000000ff0000ULL) |
+ ((x >> 40) & 0x000000000000ff00ULL) |
+ ((x >> 56) & 0x00000000000000ffULL);
+}
+#endif
+
+
+/* *************************************
+* Architecture Macros
+***************************************/
+typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
+
+/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */
+#ifndef XXH_CPU_LITTLE_ENDIAN
+ static const int g_one = 1;
+# define XXH_CPU_LITTLE_ENDIAN (*(const char*)(&g_one))
+#endif
+
+
+/* ***************************
+* Memory reads
+*****************************/
+typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
+
+FORCE_INLINE_TEMPLATE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
+{
+ if (align==XXH_unaligned)
+ return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
+ else
+ return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr);
+}
+
+FORCE_INLINE_TEMPLATE U32 XXH_readLE32(const void* ptr, XXH_endianess endian)
+{
+ return XXH_readLE32_align(ptr, endian, XXH_unaligned);
+}
+
+static U32 XXH_readBE32(const void* ptr)
+{
+ return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
+}
+
+FORCE_INLINE_TEMPLATE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
+{
+ if (align==XXH_unaligned)
+ return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
+ else
+ return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr);
+}
+
+FORCE_INLINE_TEMPLATE U64 XXH_readLE64(const void* ptr, XXH_endianess endian)
+{
+ return XXH_readLE64_align(ptr, endian, XXH_unaligned);
+}
+
+static U64 XXH_readBE64(const void* ptr)
+{
+ return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
+}
+
+
+/* *************************************
+* Macros
+***************************************/
+#define XXH_STATIC_ASSERT(c) { enum { XXH_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
+
+
+/* *************************************
+* Constants
+***************************************/
+static const U32 PRIME32_1 = 2654435761U;
+static const U32 PRIME32_2 = 2246822519U;
+static const U32 PRIME32_3 = 3266489917U;
+static const U32 PRIME32_4 = 668265263U;
+static const U32 PRIME32_5 = 374761393U;
+
+static const U64 PRIME64_1 = 11400714785074694791ULL;
+static const U64 PRIME64_2 = 14029467366897019727ULL;
+static const U64 PRIME64_3 = 1609587929392839161ULL;
+static const U64 PRIME64_4 = 9650029242287828579ULL;
+static const U64 PRIME64_5 = 2870177450012600261ULL;
+
+XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
+
+
+/* **************************
+* Utils
+****************************/
+XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dstState, const XXH32_state_t* restrict srcState)
+{
+ memcpy(dstState, srcState, sizeof(*dstState));
+}
+
+XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dstState, const XXH64_state_t* restrict srcState)
+{
+ memcpy(dstState, srcState, sizeof(*dstState));
+}
+
+
+/* ***************************
+* Simple Hash Functions
+*****************************/
+
+static U32 XXH32_round(U32 seed, U32 input)
+{
+ seed += input * PRIME32_2;
+ seed = XXH_rotl32(seed, 13);
+ seed *= PRIME32_1;
+ return seed;
+}
+
+FORCE_INLINE_TEMPLATE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align)
+{
+ const BYTE* p = (const BYTE*)input;
+ const BYTE* bEnd = p + len;
+ U32 h32;
+#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
+
+#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
+ if (p==NULL) {
+ len=0;
+ bEnd=p=(const BYTE*)(size_t)16;
+ }
+#endif
+
+ if (len>=16) {
+ const BYTE* const limit = bEnd - 16;
+ U32 v1 = seed + PRIME32_1 + PRIME32_2;
+ U32 v2 = seed + PRIME32_2;
+ U32 v3 = seed + 0;
+ U32 v4 = seed - PRIME32_1;
+
+ do {
+ v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4;
+ v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4;
+ v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4;
+ v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4;
+ } while (p<=limit);
+
+ h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
+ } else {
+ h32 = seed + PRIME32_5;
+ }
+
+ h32 += (U32) len;
+
+ while (p+4<=bEnd) {
+ h32 += XXH_get32bits(p) * PRIME32_3;
+ h32 = XXH_rotl32(h32, 17) * PRIME32_4 ;
+ p+=4;
+ }
+
+ while (p<bEnd) {
+ h32 += (*p) * PRIME32_5;
+ h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
+ p++;
+ }
+
+ h32 ^= h32 >> 15;
+ h32 *= PRIME32_2;
+ h32 ^= h32 >> 13;
+ h32 *= PRIME32_3;
+ h32 ^= h32 >> 16;
+
+ return h32;
+}
+
+
+XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed)
+{
+#if 0
+ /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
+ XXH32_CREATESTATE_STATIC(state);
+ XXH32_reset(state, seed);
+ XXH32_update(state, input, len);
+ return XXH32_digest(state);
+#else
+ XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+ if (XXH_FORCE_ALIGN_CHECK) {
+ if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */
+ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+ return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
+ else
+ return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
+ } }
+
+ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+ return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
+ else
+ return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
+#endif
+}
+
+
+static U64 XXH64_round(U64 acc, U64 input)
+{
+ acc += input * PRIME64_2;
+ acc = XXH_rotl64(acc, 31);
+ acc *= PRIME64_1;
+ return acc;
+}
+
+static U64 XXH64_mergeRound(U64 acc, U64 val)
+{
+ val = XXH64_round(0, val);
+ acc ^= val;
+ acc = acc * PRIME64_1 + PRIME64_4;
+ return acc;
+}
+
+FORCE_INLINE_TEMPLATE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align)
+{
+ const BYTE* p = (const BYTE*)input;
+ const BYTE* const bEnd = p + len;
+ U64 h64;
+#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
+
+#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
+ if (p==NULL) {
+ len=0;
+ bEnd=p=(const BYTE*)(size_t)32;
+ }
+#endif
+
+ if (len>=32) {
+ const BYTE* const limit = bEnd - 32;
+ U64 v1 = seed + PRIME64_1 + PRIME64_2;
+ U64 v2 = seed + PRIME64_2;
+ U64 v3 = seed + 0;
+ U64 v4 = seed - PRIME64_1;
+
+ do {
+ v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8;
+ v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8;
+ v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8;
+ v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8;
+ } while (p<=limit);
+
+ h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
+ h64 = XXH64_mergeRound(h64, v1);
+ h64 = XXH64_mergeRound(h64, v2);
+ h64 = XXH64_mergeRound(h64, v3);
+ h64 = XXH64_mergeRound(h64, v4);
+
+ } else {
+ h64 = seed + PRIME64_5;
+ }
+
+ h64 += (U64) len;
+
+ while (p+8<=bEnd) {
+ U64 const k1 = XXH64_round(0, XXH_get64bits(p));
+ h64 ^= k1;
+ h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
+ p+=8;
+ }
+
+ if (p+4<=bEnd) {
+ h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1;
+ h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
+ p+=4;
+ }
+
+ while (p<bEnd) {
+ h64 ^= (*p) * PRIME64_5;
+ h64 = XXH_rotl64(h64, 11) * PRIME64_1;
+ p++;
+ }
+
+ h64 ^= h64 >> 33;
+ h64 *= PRIME64_2;
+ h64 ^= h64 >> 29;
+ h64 *= PRIME64_3;
+ h64 ^= h64 >> 32;
+
+ return h64;
+}
+
+
+XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed)
+{
+#if 0
+ /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
+ XXH64_CREATESTATE_STATIC(state);
+ XXH64_reset(state, seed);
+ XXH64_update(state, input, len);
+ return XXH64_digest(state);
+#else
+ XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+ if (XXH_FORCE_ALIGN_CHECK) {
+ if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */
+ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+ return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
+ else
+ return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
+ } }
+
+ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+ return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
+ else
+ return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
+#endif
+}
+
+
+/* **************************************************
+* Advanced Hash Functions
+****************************************************/
+
+XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
+{
+ return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
+}
+XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
+{
+ XXH_free(statePtr);
+ return XXH_OK;
+}
+
+XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
+{
+ return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
+}
+XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
+{
+ XXH_free(statePtr);
+ return XXH_OK;
+}
+
+
+/*** Hash feed ***/
+
+XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed)
+{
+ XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
+ memset(&state, 0, sizeof(state)-4); /* do not write into reserved, for future removal */
+ state.v1 = seed + PRIME32_1 + PRIME32_2;
+ state.v2 = seed + PRIME32_2;
+ state.v3 = seed + 0;
+ state.v4 = seed - PRIME32_1;
+ memcpy(statePtr, &state, sizeof(state));
+ return XXH_OK;
+}
+
+
+XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed)
+{
+ XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
+ memset(&state, 0, sizeof(state)-8); /* do not write into reserved, for future removal */
+ state.v1 = seed + PRIME64_1 + PRIME64_2;
+ state.v2 = seed + PRIME64_2;
+ state.v3 = seed + 0;
+ state.v4 = seed - PRIME64_1;
+ memcpy(statePtr, &state, sizeof(state));
+ return XXH_OK;
+}
+
+
+FORCE_INLINE_TEMPLATE XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian)
+{
+ const BYTE* p = (const BYTE*)input;
+ const BYTE* const bEnd = p + len;
+
+#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
+ if (input==NULL) return XXH_ERROR;
+#endif
+
+ state->total_len_32 += (unsigned)len;
+ state->large_len |= (len>=16) | (state->total_len_32>=16);
+
+ if (state->memsize + len < 16) { /* fill in tmp buffer */
+ XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len);
+ state->memsize += (unsigned)len;
+ return XXH_OK;
+ }
+
+ if (state->memsize) { /* some data left from previous update */
+ XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize);
+ { const U32* p32 = state->mem32;
+ state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++;
+ state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++;
+ state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++;
+ state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); p32++;
+ }
+ p += 16-state->memsize;
+ state->memsize = 0;
+ }
+
+ if (p <= bEnd-16) {
+ const BYTE* const limit = bEnd - 16;
+ U32 v1 = state->v1;
+ U32 v2 = state->v2;
+ U32 v3 = state->v3;
+ U32 v4 = state->v4;
+
+ do {
+ v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4;
+ v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4;
+ v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4;
+ v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4;
+ } while (p<=limit);
+
+ state->v1 = v1;
+ state->v2 = v2;
+ state->v3 = v3;
+ state->v4 = v4;
+ }
+
+ if (p < bEnd) {
+ XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
+ state->memsize = (unsigned)(bEnd-p);
+ }
+
+ return XXH_OK;
+}
+
+XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len)
+{
+ XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+ return XXH32_update_endian(state_in, input, len, XXH_littleEndian);
+ else
+ return XXH32_update_endian(state_in, input, len, XXH_bigEndian);
+}
+
+
+
+FORCE_INLINE_TEMPLATE U32 XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian)
+{
+ const BYTE * p = (const BYTE*)state->mem32;
+ const BYTE* const bEnd = (const BYTE*)(state->mem32) + state->memsize;
+ U32 h32;
+
+ if (state->large_len) {
+ h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18);
+ } else {
+ h32 = state->v3 /* == seed */ + PRIME32_5;
+ }
+
+ h32 += state->total_len_32;
+
+ while (p+4<=bEnd) {
+ h32 += XXH_readLE32(p, endian) * PRIME32_3;
+ h32 = XXH_rotl32(h32, 17) * PRIME32_4;
+ p+=4;
+ }
+
+ while (p<bEnd) {
+ h32 += (*p) * PRIME32_5;
+ h32 = XXH_rotl32(h32, 11) * PRIME32_1;
+ p++;
+ }
+
+ h32 ^= h32 >> 15;
+ h32 *= PRIME32_2;
+ h32 ^= h32 >> 13;
+ h32 *= PRIME32_3;
+ h32 ^= h32 >> 16;
+
+ return h32;
+}
+
+
+XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in)
+{
+ XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+ return XXH32_digest_endian(state_in, XXH_littleEndian);
+ else
+ return XXH32_digest_endian(state_in, XXH_bigEndian);
+}
+
+
+
+/* **** XXH64 **** */
+
+FORCE_INLINE_TEMPLATE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian)
+{
+ const BYTE* p = (const BYTE*)input;
+ const BYTE* const bEnd = p + len;
+
+#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
+ if (input==NULL) return XXH_ERROR;
+#endif
+
+ state->total_len += len;
+
+ if (state->memsize + len < 32) { /* fill in tmp buffer */
+ XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
+ state->memsize += (U32)len;
+ return XXH_OK;
+ }
+
+ if (state->memsize) { /* tmp buffer is full */
+ XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize);
+ state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian));
+ state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian));
+ state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian));
+ state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian));
+ p += 32-state->memsize;
+ state->memsize = 0;
+ }
+
+ if (p+32 <= bEnd) {
+ const BYTE* const limit = bEnd - 32;
+ U64 v1 = state->v1;
+ U64 v2 = state->v2;
+ U64 v3 = state->v3;
+ U64 v4 = state->v4;
+
+ do {
+ v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8;
+ v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8;
+ v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8;
+ v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8;
+ } while (p<=limit);
+
+ state->v1 = v1;
+ state->v2 = v2;
+ state->v3 = v3;
+ state->v4 = v4;
+ }
+
+ if (p < bEnd) {
+ XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
+ state->memsize = (unsigned)(bEnd-p);
+ }
+
+ return XXH_OK;
+}
+
+XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len)
+{
+ XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+ return XXH64_update_endian(state_in, input, len, XXH_littleEndian);
+ else
+ return XXH64_update_endian(state_in, input, len, XXH_bigEndian);
+}
+
+
+
+FORCE_INLINE_TEMPLATE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian)
+{
+ const BYTE * p = (const BYTE*)state->mem64;
+ const BYTE* const bEnd = (const BYTE*)state->mem64 + state->memsize;
+ U64 h64;
+
+ if (state->total_len >= 32) {
+ U64 const v1 = state->v1;
+ U64 const v2 = state->v2;
+ U64 const v3 = state->v3;
+ U64 const v4 = state->v4;
+
+ h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
+ h64 = XXH64_mergeRound(h64, v1);
+ h64 = XXH64_mergeRound(h64, v2);
+ h64 = XXH64_mergeRound(h64, v3);
+ h64 = XXH64_mergeRound(h64, v4);
+ } else {
+ h64 = state->v3 + PRIME64_5;
+ }
+
+ h64 += (U64) state->total_len;
+
+ while (p+8<=bEnd) {
+ U64 const k1 = XXH64_round(0, XXH_readLE64(p, endian));
+ h64 ^= k1;
+ h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
+ p+=8;
+ }
+
+ if (p+4<=bEnd) {
+ h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1;
+ h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
+ p+=4;
+ }
+
+ while (p<bEnd) {
+ h64 ^= (*p) * PRIME64_5;
+ h64 = XXH_rotl64(h64, 11) * PRIME64_1;
+ p++;
+ }
+
+ h64 ^= h64 >> 33;
+ h64 *= PRIME64_2;
+ h64 ^= h64 >> 29;
+ h64 *= PRIME64_3;
+ h64 ^= h64 >> 32;
+
+ return h64;
+}
+
+
+XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in)
+{
+ XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+ return XXH64_digest_endian(state_in, XXH_littleEndian);
+ else
+ return XXH64_digest_endian(state_in, XXH_bigEndian);
+}
+
+
+/* **************************
+* Canonical representation
+****************************/
+
+/*! Default XXH result types are basic unsigned 32 and 64 bits.
+* The canonical representation follows human-readable write convention, aka big-endian (large digits first).
+* These functions allow transformation of hash result into and from its canonical format.
+* This way, hash values can be written into a file or buffer, and remain comparable across different systems and programs.
+*/
+
+XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
+{
+ XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
+ if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
+ memcpy(dst, &hash, sizeof(*dst));
+}
+
+XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash)
+{
+ XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
+ if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
+ memcpy(dst, &hash, sizeof(*dst));
+}
+
+XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
+{
+ return XXH_readBE32(src);
+}
+
+XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src)
+{
+ return XXH_readBE64(src);
+}
diff --git a/grub-core/lib/zstd/xxhash.h b/grub-core/lib/zstd/xxhash.h
new file mode 100644
index 0000000..9bad1f5
--- /dev/null
+++ b/grub-core/lib/zstd/xxhash.h
@@ -0,0 +1,305 @@
+/*
+ xxHash - Extremely Fast Hash algorithm
+ Header File
+ Copyright (C) 2012-2016, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - xxHash source repository : https://github.com/Cyan4973/xxHash
+*/
+
+/* Notice extracted from xxHash homepage :
+
+xxHash is an extremely fast Hash algorithm, running at RAM speed limits.
+It also successfully passes all tests from the SMHasher suite.
+
+Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz)
+
+Name Speed Q.Score Author
+xxHash 5.4 GB/s 10
+CrapWow 3.2 GB/s 2 Andrew
+MumurHash 3a 2.7 GB/s 10 Austin Appleby
+SpookyHash 2.0 GB/s 10 Bob Jenkins
+SBox 1.4 GB/s 9 Bret Mulvey
+Lookup3 1.2 GB/s 9 Bob Jenkins
+SuperFastHash 1.2 GB/s 1 Paul Hsieh
+CityHash64 1.05 GB/s 10 Pike & Alakuijala
+FNV 0.55 GB/s 5 Fowler, Noll, Vo
+CRC32 0.43 GB/s 9
+MD5-32 0.33 GB/s 10 Ronald L. Rivest
+SHA1-32 0.28 GB/s 10
+
+Q.Score is a measure of quality of the hash function.
+It depends on successfully passing SMHasher test set.
+10 is a perfect score.
+
+A 64-bits version, named XXH64, is available since r35.
+It offers much better speed, but for 64-bits applications only.
+Name Speed on 64 bits Speed on 32 bits
+XXH64 13.8 GB/s 1.9 GB/s
+XXH32 6.8 GB/s 6.0 GB/s
+*/
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#ifndef XXHASH_H_5627135585666179
+#define XXHASH_H_5627135585666179 1
+
+
+/* ****************************
+* Definitions
+******************************/
+#include <stddef.h> /* size_t */
+typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode;
+
+
+/* ****************************
+* API modifier
+******************************/
+/** XXH_PRIVATE_API
+* This is useful if you want to include xxhash functions in `static` mode
+* in order to inline them, and remove their symbol from the public list.
+* Methodology :
+* #define XXH_PRIVATE_API
+* #include "xxhash.h"
+* `xxhash.c` is automatically included.
+* It's not useful to compile and link it as a separate module anymore.
+*/
+#ifdef XXH_PRIVATE_API
+# ifndef XXH_STATIC_LINKING_ONLY
+# define XXH_STATIC_LINKING_ONLY
+# endif
+# if defined(__GNUC__)
+# define XXH_PUBLIC_API static __inline __attribute__((unused))
+# elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+# define XXH_PUBLIC_API static inline
+# elif defined(_MSC_VER)
+# define XXH_PUBLIC_API static __inline
+# else
+# define XXH_PUBLIC_API static /* this version may generate warnings for unused static functions; disable the relevant warning */
+# endif
+#else
+# define XXH_PUBLIC_API /* do nothing */
+#endif /* XXH_PRIVATE_API */
+
+/*!XXH_NAMESPACE, aka Namespace Emulation :
+
+If you want to include _and expose_ xxHash functions from within your own library,
+but also want to avoid symbol collisions with another library which also includes xxHash,
+
+you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library
+with the value of XXH_NAMESPACE (so avoid to keep it NULL and avoid numeric values).
+
+Note that no change is required within the calling program as long as it includes `xxhash.h` :
+regular symbol name will be automatically translated by this header.
+*/
+#ifdef XXH_NAMESPACE
+# define XXH_CAT(A,B) A##B
+# define XXH_NAME2(A,B) XXH_CAT(A,B)
+# define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
+# define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
+# define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
+# define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
+# define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
+# define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
+# define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
+# define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
+# define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
+# define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)
+# define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
+# define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
+# define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
+# define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
+# define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
+# define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
+# define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
+# define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
+# define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
+#endif
+
+
+/* *************************************
+* Version
+***************************************/
+#define XXH_VERSION_MAJOR 0
+#define XXH_VERSION_MINOR 6
+#define XXH_VERSION_RELEASE 2
+#define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
+XXH_PUBLIC_API unsigned XXH_versionNumber (void);
+
+
+/* ****************************
+* Simple Hash Functions
+******************************/
+typedef unsigned int XXH32_hash_t;
+typedef unsigned long long XXH64_hash_t;
+
+XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, unsigned int seed);
+XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, unsigned long long seed);
+
+/*!
+XXH32() :
+ Calculate the 32-bits hash of sequence "length" bytes stored at memory address "input".
+ The memory between input & input+length must be valid (allocated and read-accessible).
+ "seed" can be used to alter the result predictably.
+ Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s
+XXH64() :
+ Calculate the 64-bits hash of sequence of length "len" stored at memory address "input".
+ "seed" can be used to alter the result predictably.
+ This function runs 2x faster on 64-bits systems, but slower on 32-bits systems (see benchmark).
+*/
+
+
+/* ****************************
+* Streaming Hash Functions
+******************************/
+typedef struct XXH32_state_s XXH32_state_t; /* incomplete type */
+typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */
+
+/*! State allocation, compatible with dynamic libraries */
+
+XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void);
+XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr);
+
+XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void);
+XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr);
+
+
+/* hash streaming */
+
+XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, unsigned int seed);
+XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length);
+XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr);
+
+XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, unsigned long long seed);
+XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length);
+XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr);
+
+/*
+These functions generate the xxHash of an input provided in multiple segments.
+Note that, for small input, they are slower than single-call functions, due to state management.
+For small input, prefer `XXH32()` and `XXH64()` .
+
+XXH state must first be allocated, using XXH*_createState() .
+
+Start a new hash by initializing state with a seed, using XXH*_reset().
+
+Then, feed the hash state by calling XXH*_update() as many times as necessary.
+Obviously, input must be allocated and read accessible.
+The function returns an error code, with 0 meaning OK, and any other value meaning there is an error.
+
+Finally, a hash value can be produced anytime, by using XXH*_digest().
+This function returns the nn-bits hash as an int or long long.
+
+It's still possible to continue inserting input into the hash state after a digest,
+and generate some new hashes later on, by calling again XXH*_digest().
+
+When done, free XXH state space if it was allocated dynamically.
+*/
+
+
+/* **************************
+* Utils
+****************************/
+#if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* ! C99 */
+# define restrict /* disable restrict */
+#endif
+
+XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dst_state, const XXH32_state_t* restrict src_state);
+XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dst_state, const XXH64_state_t* restrict src_state);
+
+
+/* **************************
+* Canonical representation
+****************************/
+/* Default result type for XXH functions are primitive unsigned 32 and 64 bits.
+* The canonical representation uses human-readable write convention, aka big-endian (large digits first).
+* These functions allow transformation of hash result into and from its canonical format.
+* This way, hash values can be written into a file / memory, and remain comparable on different systems and programs.
+*/
+typedef struct { unsigned char digest[4]; } XXH32_canonical_t;
+typedef struct { unsigned char digest[8]; } XXH64_canonical_t;
+
+XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash);
+XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash);
+
+XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src);
+XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src);
+
+#endif /* XXHASH_H_5627135585666179 */
+
+
+
+/* ================================================================================================
+ This section contains definitions which are not guaranteed to remain stable.
+ They may change in future versions, becoming incompatible with a different version of the library.
+ They shall only be used with static linking.
+ Never use these definitions in association with dynamic linking !
+=================================================================================================== */
+#if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXH_STATIC_H_3543687687345)
+#define XXH_STATIC_H_3543687687345
+
+/* These definitions are only meant to allow allocation of XXH state
+ statically, on stack, or in a struct for example.
+ Do not use members directly. */
+
+ struct XXH32_state_s {
+ unsigned total_len_32;
+ unsigned large_len;
+ unsigned v1;
+ unsigned v2;
+ unsigned v3;
+ unsigned v4;
+ unsigned mem32[4]; /* buffer defined as U32 for alignment */
+ unsigned memsize;
+ unsigned reserved; /* never read nor write, will be removed in a future version */
+ }; /* typedef'd to XXH32_state_t */
+
+ struct XXH64_state_s {
+ unsigned long long total_len;
+ unsigned long long v1;
+ unsigned long long v2;
+ unsigned long long v3;
+ unsigned long long v4;
+ unsigned long long mem64[4]; /* buffer defined as U64 for alignment */
+ unsigned memsize;
+ unsigned reserved[2]; /* never read nor write, will be removed in a future version */
+ }; /* typedef'd to XXH64_state_t */
+
+
+# ifdef XXH_PRIVATE_API
+# include "xxhash.c" /* include xxhash functions as `static`, for inlining */
+# endif
+
+#endif /* XXH_STATIC_LINKING_ONLY && XXH_STATIC_H_3543687687345 */
+
+
+#if defined (__cplusplus)
+}
+#endif
diff --git a/grub-core/lib/zstd/zstd.h b/grub-core/lib/zstd/zstd.h
new file mode 100644
index 0000000..7b6964b
--- /dev/null
+++ b/grub-core/lib/zstd/zstd.h
@@ -0,0 +1,1516 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#ifndef ZSTD_H_235446
+#define ZSTD_H_235446
+
+/* ====== Dependency ======*/
+#include <stddef.h> /* size_t */
+
+
+/* ===== ZSTDLIB_API : control library symbols visibility ===== */
+#ifndef ZSTDLIB_VISIBILITY
+# if defined(__GNUC__) && (__GNUC__ >= 4)
+# define ZSTDLIB_VISIBILITY __attribute__ ((visibility ("default")))
+# else
+# define ZSTDLIB_VISIBILITY
+# endif
+#endif
+#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
+# define ZSTDLIB_API __declspec(dllexport) ZSTDLIB_VISIBILITY
+#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1)
+# define ZSTDLIB_API __declspec(dllimport) ZSTDLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
+#else
+# define ZSTDLIB_API ZSTDLIB_VISIBILITY
+#endif
+
+
+/*******************************************************************************
+ Introduction
+
+ zstd, short for Zstandard, is a fast lossless compression algorithm, targeting
+ real-time compression scenarios at zlib-level and better compression ratios.
+ The zstd compression library provides in-memory compression and decompression
+ functions.
+
+ The library supports regular compression levels from 1 up to ZSTD_maxCLevel(),
+ which is currently 22. Levels >= 20, labeled `--ultra`, should be used with
+ caution, as they require more memory. The library also offers negative
+ compression levels, which extend the range of speed vs. ratio preferences.
+ The lower the level, the faster the speed (at the cost of compression).
+
+ Compression can be done in:
+ - a single step (described as Simple API)
+ - a single step, reusing a context (described as Explicit context)
+ - unbounded multiple steps (described as Streaming compression)
+
+ The compression ratio achievable on small data can be highly improved using
+ a dictionary. Dictionary compression can be performed in:
+ - a single step (described as Simple dictionary API)
+ - a single step, reusing a dictionary (described as Bulk-processing
+ dictionary API)
+
+ Advanced experimental functions can be accessed using
+ `#define ZSTD_STATIC_LINKING_ONLY` before including zstd.h.
+
+ Advanced experimental APIs should never be used with a dynamically-linked
+ library. They are not "stable"; their definitions or signatures may change in
+ the future. Only static linking is allowed.
+*******************************************************************************/
+
+/*------ Version ------*/
+#define ZSTD_VERSION_MAJOR 1
+#define ZSTD_VERSION_MINOR 3
+#define ZSTD_VERSION_RELEASE 6
+
+#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
+ZSTDLIB_API unsigned ZSTD_versionNumber(void); /**< useful to check dll version */
+
+#define ZSTD_LIB_VERSION ZSTD_VERSION_MAJOR.ZSTD_VERSION_MINOR.ZSTD_VERSION_RELEASE
+#define ZSTD_QUOTE(str) #str
+#define ZSTD_EXPAND_AND_QUOTE(str) ZSTD_QUOTE(str)
+#define ZSTD_VERSION_STRING ZSTD_EXPAND_AND_QUOTE(ZSTD_LIB_VERSION)
+ZSTDLIB_API const char* ZSTD_versionString(void); /* v1.3.0+ */
+
+/***************************************
+* Default constant
+***************************************/
+#ifndef ZSTD_CLEVEL_DEFAULT
+# define ZSTD_CLEVEL_DEFAULT 3
+#endif
+
+/***************************************
+* Simple API
+***************************************/
+/*! ZSTD_compress() :
+ * Compresses `src` content as a single zstd compressed frame into already allocated `dst`.
+ * Hint : compression runs faster if `dstCapacity` >= `ZSTD_compressBound(srcSize)`.
+ * @return : compressed size written into `dst` (<= `dstCapacity),
+ * or an error code if it fails (which can be tested using ZSTD_isError()). */
+ZSTDLIB_API size_t ZSTD_compress( void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ int compressionLevel);
+
+/*! ZSTD_decompress() :
+ * `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames.
+ * `dstCapacity` is an upper bound of originalSize to regenerate.
+ * If user cannot imply a maximum upper bound, it's better to use streaming mode to decompress data.
+ * @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
+ * or an errorCode if it fails (which can be tested using ZSTD_isError()). */
+ZSTDLIB_API size_t ZSTD_decompress( void* dst, size_t dstCapacity,
+ const void* src, size_t compressedSize);
+
+/*! ZSTD_getFrameContentSize() : added in v1.3.0
+ * `src` should point to the start of a ZSTD encoded frame.
+ * `srcSize` must be at least as large as the frame header.
+ * hint : any size >= `ZSTD_frameHeaderSize_max` is large enough.
+ * @return : - decompressed size of `src` frame content, if known
+ * - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
+ * - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small)
+ * note 1 : a 0 return value means the frame is valid but "empty".
+ * note 2 : decompressed size is an optional field, it may not be present, typically in streaming mode.
+ * When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
+ * In which case, it's necessary to use streaming mode to decompress data.
+ * Optionally, application can rely on some implicit limit,
+ * as ZSTD_decompress() only needs an upper bound of decompressed size.
+ * (For example, data could be necessarily cut into blocks <= 16 KB).
+ * note 3 : decompressed size is always present when compression is completed using single-pass functions,
+ * such as ZSTD_compress(), ZSTD_compressCCtx() ZSTD_compress_usingDict() or ZSTD_compress_usingCDict().
+ * note 4 : decompressed size can be very large (64-bits value),
+ * potentially larger than what local system can handle as a single memory segment.
+ * In which case, it's necessary to use streaming mode to decompress data.
+ * note 5 : If source is untrusted, decompressed size could be wrong or intentionally modified.
+ * Always ensure return value fits within application's authorized limits.
+ * Each application can set its own limits.
+ * note 6 : This function replaces ZSTD_getDecompressedSize() */
+#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1)
+#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2)
+ZSTDLIB_API unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize);
+
+/*! ZSTD_getDecompressedSize() :
+ * NOTE: This function is now obsolete, in favor of ZSTD_getFrameContentSize().
+ * Both functions work the same way, but ZSTD_getDecompressedSize() blends
+ * "empty", "unknown" and "error" results to the same return value (0),
+ * while ZSTD_getFrameContentSize() gives them separate return values.
+ * @return : decompressed size of `src` frame content _if known and not empty_, 0 otherwise. */
+ZSTDLIB_API unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize);
+
+
+/*====== Helper functions ======*/
+#define ZSTD_COMPRESSBOUND(srcSize) ((srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0)) /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */
+ZSTDLIB_API size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */
+ZSTDLIB_API unsigned ZSTD_isError(size_t code); /*!< tells if a `size_t` function result is an error code */
+ZSTDLIB_API const char* ZSTD_getErrorName(size_t code); /*!< provides readable string from an error code */
+ZSTDLIB_API int ZSTD_maxCLevel(void); /*!< maximum compression level available */
+
+
+/***************************************
+* Explicit context
+***************************************/
+/*= Compression context
+ * When compressing many times,
+ * it is recommended to allocate a context just once, and re-use it for each successive compression operation.
+ * This will make workload friendlier for system's memory.
+ * Use one context per thread for parallel execution in multi-threaded environments. */
+typedef struct ZSTD_CCtx_s ZSTD_CCtx;
+ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx(void);
+ZSTDLIB_API size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx);
+
+/*! ZSTD_compressCCtx() :
+ * Same as ZSTD_compress(), requires an allocated ZSTD_CCtx (see ZSTD_createCCtx()). */
+ZSTDLIB_API size_t ZSTD_compressCCtx(ZSTD_CCtx* ctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ int compressionLevel);
+
+/*= Decompression context
+ * When decompressing many times,
+ * it is recommended to allocate a context only once,
+ * and re-use it for each successive compression operation.
+ * This will make workload friendlier for system's memory.
+ * Use one context per thread for parallel execution. */
+typedef struct ZSTD_DCtx_s ZSTD_DCtx;
+ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx(void);
+ZSTDLIB_API size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx);
+
+/*! ZSTD_decompressDCtx() :
+ * Same as ZSTD_decompress(), requires an allocated ZSTD_DCtx (see ZSTD_createDCtx()) */
+ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* ctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize);
+
+
+/**************************
+* Simple dictionary API
+***************************/
+/*! ZSTD_compress_usingDict() :
+ * Compression using a predefined Dictionary (see dictBuilder/zdict.h).
+ * Note : This function loads the dictionary, resulting in significant startup delay.
+ * Note : When `dict == NULL || dictSize < 8` no dictionary is used. */
+ZSTDLIB_API size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize,
+ int compressionLevel);
+
+/*! ZSTD_decompress_usingDict() :
+ * Decompression using a predefined Dictionary (see dictBuilder/zdict.h).
+ * Dictionary must be identical to the one used during compression.
+ * Note : This function loads the dictionary, resulting in significant startup delay.
+ * Note : When `dict == NULL || dictSize < 8` no dictionary is used. */
+ZSTDLIB_API size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize);
+
+
+/**********************************
+ * Bulk processing dictionary API
+ *********************************/
+typedef struct ZSTD_CDict_s ZSTD_CDict;
+
+/*! ZSTD_createCDict() :
+ * When compressing multiple messages / blocks with the same dictionary, it's recommended to load it just once.
+ * ZSTD_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay.
+ * ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
+ * `dictBuffer` can be released after ZSTD_CDict creation, since its content is copied within CDict
+ * Note : A ZSTD_CDict can be created with an empty dictionary, but it is inefficient for small data. */
+ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict(const void* dictBuffer, size_t dictSize,
+ int compressionLevel);
+
+/*! ZSTD_freeCDict() :
+ * Function frees memory allocated by ZSTD_createCDict(). */
+ZSTDLIB_API size_t ZSTD_freeCDict(ZSTD_CDict* CDict);
+
+/*! ZSTD_compress_usingCDict() :
+ * Compression using a digested Dictionary.
+ * Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
+ * Note that compression level is decided during dictionary creation.
+ * Frame parameters are hardcoded (dictID=yes, contentSize=yes, checksum=no)
+ * Note : ZSTD_compress_usingCDict() can be used with a ZSTD_CDict created from an empty dictionary.
+ * But it is inefficient for small data, and it is recommended to use ZSTD_compressCCtx(). */
+ZSTDLIB_API size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_CDict* cdict);
+
+
+typedef struct ZSTD_DDict_s ZSTD_DDict;
+
+/*! ZSTD_createDDict() :
+ * Create a digested dictionary, ready to start decompression operation without startup delay.
+ * dictBuffer can be released after DDict creation, as its content is copied inside DDict */
+ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict(const void* dictBuffer, size_t dictSize);
+
+/*! ZSTD_freeDDict() :
+ * Function frees memory allocated with ZSTD_createDDict() */
+ZSTDLIB_API size_t ZSTD_freeDDict(ZSTD_DDict* ddict);
+
+/*! ZSTD_decompress_usingDDict() :
+ * Decompression using a digested Dictionary.
+ * Faster startup than ZSTD_decompress_usingDict(), recommended when same dictionary is used multiple times. */
+ZSTDLIB_API size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_DDict* ddict);
+
+
+/****************************
+* Streaming
+****************************/
+
+typedef struct ZSTD_inBuffer_s {
+ const void* src; /**< start of input buffer */
+ size_t size; /**< size of input buffer */
+ size_t pos; /**< position where reading stopped. Will be updated. Necessarily 0 <= pos <= size */
+} ZSTD_inBuffer;
+
+typedef struct ZSTD_outBuffer_s {
+ void* dst; /**< start of output buffer */
+ size_t size; /**< size of output buffer */
+ size_t pos; /**< position where writing stopped. Will be updated. Necessarily 0 <= pos <= size */
+} ZSTD_outBuffer;
+
+
+
+/*-***********************************************************************
+* Streaming compression - HowTo
+*
+* A ZSTD_CStream object is required to track streaming operation.
+* Use ZSTD_createCStream() and ZSTD_freeCStream() to create/release resources.
+* ZSTD_CStream objects can be reused multiple times on consecutive compression operations.
+* It is recommended to re-use ZSTD_CStream in situations where many streaming operations will be achieved consecutively,
+* since it will play nicer with system's memory, by re-using already allocated memory.
+* Use one separate ZSTD_CStream per thread for parallel execution.
+*
+* Start a new compression by initializing ZSTD_CStream context.
+* Use ZSTD_initCStream() to start a new compression operation.
+* Use variants ZSTD_initCStream_usingDict() or ZSTD_initCStream_usingCDict() for streaming with dictionary (experimental section)
+*
+* Use ZSTD_compressStream() as many times as necessary to consume input stream.
+* The function will automatically update both `pos` fields within `input` and `output`.
+* Note that the function may not consume the entire input,
+* for example, because the output buffer is already full,
+* in which case `input.pos < input.size`.
+* The caller must check if input has been entirely consumed.
+* If not, the caller must make some room to receive more compressed data,
+* typically by emptying output buffer, or allocating a new output buffer,
+* and then present again remaining input data.
+* @return : a size hint, preferred nb of bytes to use as input for next function call
+* or an error code, which can be tested using ZSTD_isError().
+* Note 1 : it's just a hint, to help latency a little, any other value will work fine.
+* Note 2 : size hint is guaranteed to be <= ZSTD_CStreamInSize()
+*
+* At any moment, it's possible to flush whatever data might remain stuck within internal buffer,
+* using ZSTD_flushStream(). `output->pos` will be updated.
+* Note that, if `output->size` is too small, a single invocation of ZSTD_flushStream() might not be enough (return code > 0).
+* In which case, make some room to receive more compressed data, and call again ZSTD_flushStream().
+* @return : 0 if internal buffers are entirely flushed,
+* >0 if some data still present within internal buffer (the value is minimal estimation of remaining size),
+* or an error code, which can be tested using ZSTD_isError().
+*
+* ZSTD_endStream() instructs to finish a frame.
+* It will perform a flush and write frame epilogue.
+* The epilogue is required for decoders to consider a frame completed.
+* flush() operation is the same, and follows same rules as ZSTD_flushStream().
+* @return : 0 if frame fully completed and fully flushed,
+* >0 if some data still present within internal buffer (the value is minimal estimation of remaining size),
+* or an error code, which can be tested using ZSTD_isError().
+*
+* *******************************************************************/
+
+typedef ZSTD_CCtx ZSTD_CStream; /**< CCtx and CStream are now effectively same object (>= v1.3.0) */
+ /* Continue to distinguish them for compatibility with older versions <= v1.2.0 */
+/*===== ZSTD_CStream management functions =====*/
+ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream(void);
+ZSTDLIB_API size_t ZSTD_freeCStream(ZSTD_CStream* zcs);
+
+/*===== Streaming compression functions =====*/
+ZSTDLIB_API size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel);
+ZSTDLIB_API size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
+ZSTDLIB_API size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
+ZSTDLIB_API size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
+
+ZSTDLIB_API size_t ZSTD_CStreamInSize(void); /**< recommended size for input buffer */
+ZSTDLIB_API size_t ZSTD_CStreamOutSize(void); /**< recommended size for output buffer. Guarantee to successfully flush at least one complete compressed block in all circumstances. */
+
+
+
+/*-***************************************************************************
+* Streaming decompression - HowTo
+*
+* A ZSTD_DStream object is required to track streaming operations.
+* Use ZSTD_createDStream() and ZSTD_freeDStream() to create/release resources.
+* ZSTD_DStream objects can be re-used multiple times.
+*
+* Use ZSTD_initDStream() to start a new decompression operation,
+* or ZSTD_initDStream_usingDict() if decompression requires a dictionary.
+* @return : recommended first input size
+*
+* Use ZSTD_decompressStream() repetitively to consume your input.
+* The function will update both `pos` fields.
+* If `input.pos < input.size`, some input has not been consumed.
+* It's up to the caller to present again remaining data.
+* If `output.pos < output.size`, decoder has flushed everything it could.
+* @return : 0 when a frame is completely decoded and fully flushed,
+* an error code, which can be tested using ZSTD_isError(),
+* any other value > 0, which means there is still some decoding to do to complete current frame.
+* The return value is a suggested next input size (a hint to improve latency) that will never load more than the current frame.
+* *******************************************************************************/
+
+typedef ZSTD_DCtx ZSTD_DStream; /**< DCtx and DStream are now effectively same object (>= v1.3.0) */
+ /* For compatibility with versions <= v1.2.0, continue to consider them separated. */
+/*===== ZSTD_DStream management functions =====*/
+ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream(void);
+ZSTDLIB_API size_t ZSTD_freeDStream(ZSTD_DStream* zds);
+
+/*===== Streaming decompression functions =====*/
+ZSTDLIB_API size_t ZSTD_initDStream(ZSTD_DStream* zds);
+ZSTDLIB_API size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
+
+ZSTDLIB_API size_t ZSTD_DStreamInSize(void); /*!< recommended size for input buffer */
+ZSTDLIB_API size_t ZSTD_DStreamOutSize(void); /*!< recommended size for output buffer. Guarantee to successfully flush at least one complete block in all circumstances. */
+
+#endif /* ZSTD_H_235446 */
+
+
+
+
+#if defined(ZSTD_STATIC_LINKING_ONLY) && !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY)
+#define ZSTD_H_ZSTD_STATIC_LINKING_ONLY
+
+/****************************************************************************************
+ * ADVANCED AND EXPERIMENTAL FUNCTIONS
+ ****************************************************************************************
+ * The definitions in this section are considered experimental.
+ * They should never be used with a dynamic library, as prototypes may change in the future.
+ * They are provided for advanced scenarios.
+ * Use them only in association with static linking.
+ * ***************************************************************************************/
+
+ZSTDLIB_API int ZSTD_minCLevel(void); /*!< minimum negative compression level allowed */
+
+/* --- Constants ---*/
+#define ZSTD_MAGICNUMBER 0xFD2FB528 /* v0.8+ */
+#define ZSTD_MAGIC_DICTIONARY 0xEC30A437 /* v0.7+ */
+#define ZSTD_MAGIC_SKIPPABLE_START 0x184D2A50U
+
+#define ZSTD_BLOCKSIZELOG_MAX 17
+#define ZSTD_BLOCKSIZE_MAX (1<<ZSTD_BLOCKSIZELOG_MAX) /* define, for static allocation */
+
+#define ZSTD_WINDOWLOG_MAX_32 30
+#define ZSTD_WINDOWLOG_MAX_64 31
+#define ZSTD_WINDOWLOG_MAX ((unsigned)(sizeof(size_t) == 4 ? ZSTD_WINDOWLOG_MAX_32 : ZSTD_WINDOWLOG_MAX_64))
+#define ZSTD_WINDOWLOG_MIN 10
+#define ZSTD_HASHLOG_MAX ((ZSTD_WINDOWLOG_MAX < 30) ? ZSTD_WINDOWLOG_MAX : 30)
+#define ZSTD_HASHLOG_MIN 6
+#define ZSTD_CHAINLOG_MAX_32 29
+#define ZSTD_CHAINLOG_MAX_64 30
+#define ZSTD_CHAINLOG_MAX ((unsigned)(sizeof(size_t) == 4 ? ZSTD_CHAINLOG_MAX_32 : ZSTD_CHAINLOG_MAX_64))
+#define ZSTD_CHAINLOG_MIN ZSTD_HASHLOG_MIN
+#define ZSTD_HASHLOG3_MAX 17
+#define ZSTD_SEARCHLOG_MAX (ZSTD_WINDOWLOG_MAX-1)
+#define ZSTD_SEARCHLOG_MIN 1
+#define ZSTD_SEARCHLENGTH_MAX 7 /* only for ZSTD_fast, other strategies are limited to 6 */
+#define ZSTD_SEARCHLENGTH_MIN 3 /* only for ZSTD_btopt, other strategies are limited to 4 */
+#define ZSTD_TARGETLENGTH_MAX ZSTD_BLOCKSIZE_MAX
+#define ZSTD_TARGETLENGTH_MIN 0 /* note : comparing this constant to an unsigned results in a tautological test */
+#define ZSTD_LDM_MINMATCH_MAX 4096
+#define ZSTD_LDM_MINMATCH_MIN 4
+#define ZSTD_LDM_BUCKETSIZELOG_MAX 8
+
+#define ZSTD_FRAMEHEADERSIZE_PREFIX 5 /* minimum input size to know frame header size */
+#define ZSTD_FRAMEHEADERSIZE_MIN 6
+#define ZSTD_FRAMEHEADERSIZE_MAX 18 /* for static allocation */
+static const size_t ZSTD_frameHeaderSize_prefix = ZSTD_FRAMEHEADERSIZE_PREFIX;
+static const size_t ZSTD_frameHeaderSize_min = ZSTD_FRAMEHEADERSIZE_MIN;
+static const size_t ZSTD_frameHeaderSize_max = ZSTD_FRAMEHEADERSIZE_MAX;
+static const size_t ZSTD_skippableHeaderSize = 8; /* magic number + skippable frame length */
+
+
+
+/* --- Advanced types --- */
+typedef enum { ZSTD_fast=1, ZSTD_dfast, ZSTD_greedy, ZSTD_lazy, ZSTD_lazy2,
+ ZSTD_btlazy2, ZSTD_btopt, ZSTD_btultra } ZSTD_strategy; /* from faster to stronger */
+
+typedef struct {
+ unsigned windowLog; /**< largest match distance : larger == more compression, more memory needed during decompression */
+ unsigned chainLog; /**< fully searched segment : larger == more compression, slower, more memory (useless for fast) */
+ unsigned hashLog; /**< dispatch table : larger == faster, more memory */
+ unsigned searchLog; /**< nb of searches : larger == more compression, slower */
+ unsigned searchLength; /**< match length searched : larger == faster decompression, sometimes less compression */
+ unsigned targetLength; /**< acceptable match size for optimal parser (only) : larger == more compression, slower */
+ ZSTD_strategy strategy;
+} ZSTD_compressionParameters;
+
+typedef struct {
+ unsigned contentSizeFlag; /**< 1: content size will be in frame header (when known) */
+ unsigned checksumFlag; /**< 1: generate a 32-bits checksum at end of frame, for error detection */
+ unsigned noDictIDFlag; /**< 1: no dictID will be saved into frame header (if dictionary compression) */
+} ZSTD_frameParameters;
+
+typedef struct {
+ ZSTD_compressionParameters cParams;
+ ZSTD_frameParameters fParams;
+} ZSTD_parameters;
+
+typedef struct ZSTD_CCtx_params_s ZSTD_CCtx_params;
+
+typedef enum {
+ ZSTD_dct_auto=0, /* dictionary is "full" when starting with ZSTD_MAGIC_DICTIONARY, otherwise it is "rawContent" */
+ ZSTD_dct_rawContent, /* ensures dictionary is always loaded as rawContent, even if it starts with ZSTD_MAGIC_DICTIONARY */
+ ZSTD_dct_fullDict /* refuses to load a dictionary if it does not respect Zstandard's specification */
+} ZSTD_dictContentType_e;
+
+typedef enum {
+ ZSTD_dlm_byCopy = 0, /**< Copy dictionary content internally */
+ ZSTD_dlm_byRef, /**< Reference dictionary content -- the dictionary buffer must outlive its users. */
+} ZSTD_dictLoadMethod_e;
+
+
+
+/***************************************
+* Frame size functions
+***************************************/
+
+/*! ZSTD_findFrameCompressedSize() :
+ * `src` should point to the start of a ZSTD encoded frame or skippable frame
+ * `srcSize` must be >= first frame size
+ * @return : the compressed size of the first frame starting at `src`,
+ * suitable to pass to `ZSTD_decompress` or similar,
+ * or an error code if input is invalid */
+ZSTDLIB_API size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize);
+
+/*! ZSTD_findDecompressedSize() :
+ * `src` should point the start of a series of ZSTD encoded and/or skippable frames
+ * `srcSize` must be the _exact_ size of this series
+ * (i.e. there should be a frame boundary exactly at `srcSize` bytes after `src`)
+ * @return : - decompressed size of all data in all successive frames
+ * - if the decompressed size cannot be determined: ZSTD_CONTENTSIZE_UNKNOWN
+ * - if an error occurred: ZSTD_CONTENTSIZE_ERROR
+ *
+ * note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode.
+ * When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
+ * In which case, it's necessary to use streaming mode to decompress data.
+ * note 2 : decompressed size is always present when compression is done with ZSTD_compress()
+ * note 3 : decompressed size can be very large (64-bits value),
+ * potentially larger than what local system can handle as a single memory segment.
+ * In which case, it's necessary to use streaming mode to decompress data.
+ * note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified.
+ * Always ensure result fits within application's authorized limits.
+ * Each application can set its own limits.
+ * note 5 : ZSTD_findDecompressedSize handles multiple frames, and so it must traverse the input to
+ * read each contained frame header. This is fast as most of the data is skipped,
+ * however it does mean that all frame data must be present and valid. */
+ZSTDLIB_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize);
+
+/*! ZSTD_frameHeaderSize() :
+ * srcSize must be >= ZSTD_frameHeaderSize_prefix.
+ * @return : size of the Frame Header,
+ * or an error code (if srcSize is too small) */
+ZSTDLIB_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize);
+
+
+/***************************************
+* Memory management
+***************************************/
+
+/*! ZSTD_sizeof_*() :
+ * These functions give the current memory usage of selected object.
+ * Object memory usage can evolve when re-used. */
+ZSTDLIB_API size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx);
+ZSTDLIB_API size_t ZSTD_sizeof_DCtx(const ZSTD_DCtx* dctx);
+ZSTDLIB_API size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs);
+ZSTDLIB_API size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds);
+ZSTDLIB_API size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict);
+ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
+
+/*! ZSTD_estimate*() :
+ * These functions make it possible to estimate memory usage
+ * of a future {D,C}Ctx, before its creation.
+ * ZSTD_estimateCCtxSize() will provide a budget large enough for any compression level up to selected one.
+ * It will also consider src size to be arbitrarily "large", which is worst case.
+ * If srcSize is known to always be small, ZSTD_estimateCCtxSize_usingCParams() can provide a tighter estimation.
+ * ZSTD_estimateCCtxSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
+ * ZSTD_estimateCCtxSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParam_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_p_nbWorkers is >= 1.
+ * Note : CCtx size estimation is only correct for single-threaded compression. */
+ZSTDLIB_API size_t ZSTD_estimateCCtxSize(int compressionLevel);
+ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams);
+ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params);
+ZSTDLIB_API size_t ZSTD_estimateDCtxSize(void);
+
+/*! ZSTD_estimateCStreamSize() :
+ * ZSTD_estimateCStreamSize() will provide a budget large enough for any compression level up to selected one.
+ * It will also consider src size to be arbitrarily "large", which is worst case.
+ * If srcSize is known to always be small, ZSTD_estimateCStreamSize_usingCParams() can provide a tighter estimation.
+ * ZSTD_estimateCStreamSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
+ * ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParam_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_p_nbWorkers is >= 1.
+ * Note : CStream size estimation is only correct for single-threaded compression.
+ * ZSTD_DStream memory budget depends on window Size.
+ * This information can be passed manually, using ZSTD_estimateDStreamSize,
+ * or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame();
+ * Note : if streaming is init with function ZSTD_init?Stream_usingDict(),
+ * an internal ?Dict will be created, which additional size is not estimated here.
+ * In this case, get total size by adding ZSTD_estimate?DictSize */
+ZSTDLIB_API size_t ZSTD_estimateCStreamSize(int compressionLevel);
+ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams);
+ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params);
+ZSTDLIB_API size_t ZSTD_estimateDStreamSize(size_t windowSize);
+ZSTDLIB_API size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize);
+
+/*! ZSTD_estimate?DictSize() :
+ * ZSTD_estimateCDictSize() will bet that src size is relatively "small", and content is copied, like ZSTD_createCDict().
+ * ZSTD_estimateCDictSize_advanced() makes it possible to control compression parameters precisely, like ZSTD_createCDict_advanced().
+ * Note : dictionaries created by reference (`ZSTD_dlm_byRef`) are logically smaller.
+ */
+ZSTDLIB_API size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel);
+ZSTDLIB_API size_t ZSTD_estimateCDictSize_advanced(size_t dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod);
+ZSTDLIB_API size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod);
+
+/*! ZSTD_initStatic*() :
+ * Initialize an object using a pre-allocated fixed-size buffer.
+ * workspace: The memory area to emplace the object into.
+ * Provided pointer *must be 8-bytes aligned*.
+ * Buffer must outlive object.
+ * workspaceSize: Use ZSTD_estimate*Size() to determine
+ * how large workspace must be to support target scenario.
+ * @return : pointer to object (same address as workspace, just different type),
+ * or NULL if error (size too small, incorrect alignment, etc.)
+ * Note : zstd will never resize nor malloc() when using a static buffer.
+ * If the object requires more memory than available,
+ * zstd will just error out (typically ZSTD_error_memory_allocation).
+ * Note 2 : there is no corresponding "free" function.
+ * Since workspace is allocated externally, it must be freed externally too.
+ * Note 3 : cParams : use ZSTD_getCParams() to convert a compression level
+ * into its associated cParams.
+ * Limitation 1 : currently not compatible with internal dictionary creation, triggered by
+ * ZSTD_CCtx_loadDictionary(), ZSTD_initCStream_usingDict() or ZSTD_initDStream_usingDict().
+ * Limitation 2 : static cctx currently not compatible with multi-threading.
+ * Limitation 3 : static dctx is incompatible with legacy support.
+ */
+ZSTDLIB_API ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize);
+ZSTDLIB_API ZSTD_CStream* ZSTD_initStaticCStream(void* workspace, size_t workspaceSize); /**< same as ZSTD_initStaticCCtx() */
+
+ZSTDLIB_API ZSTD_DCtx* ZSTD_initStaticDCtx(void* workspace, size_t workspaceSize);
+ZSTDLIB_API ZSTD_DStream* ZSTD_initStaticDStream(void* workspace, size_t workspaceSize); /**< same as ZSTD_initStaticDCtx() */
+
+ZSTDLIB_API const ZSTD_CDict* ZSTD_initStaticCDict(
+ void* workspace, size_t workspaceSize,
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_compressionParameters cParams);
+
+ZSTDLIB_API const ZSTD_DDict* ZSTD_initStaticDDict(
+ void* workspace, size_t workspaceSize,
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType);
+
+/*! Custom memory allocation :
+ * These prototypes make it possible to pass your own allocation/free functions.
+ * ZSTD_customMem is provided at creation time, using ZSTD_create*_advanced() variants listed below.
+ * All allocation/free operations will be completed using these custom variants instead of regular <stdlib.h> ones.
+ */
+typedef void* (*ZSTD_allocFunction) (void* opaque, size_t size);
+typedef void (*ZSTD_freeFunction) (void* opaque, void* address);
+typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; void* opaque; } ZSTD_customMem;
+static ZSTD_customMem const ZSTD_defaultCMem = { NULL, NULL, NULL }; /**< this constant defers to stdlib's functions */
+
+ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem);
+ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem);
+ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem);
+ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem);
+
+ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_compressionParameters cParams,
+ ZSTD_customMem customMem);
+
+ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_customMem customMem);
+
+
+
+/***************************************
+* Advanced compression functions
+***************************************/
+
+/*! ZSTD_createCDict_byReference() :
+ * Create a digested dictionary for compression
+ * Dictionary content is simply referenced, and therefore stays in dictBuffer.
+ * It is important that dictBuffer outlives CDict, it must remain read accessible throughout the lifetime of CDict */
+ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel);
+
+/*! ZSTD_getCParams() :
+* @return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize.
+* `estimatedSrcSize` value is optional, select 0 if not known */
+ZSTDLIB_API ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
+
+/*! ZSTD_getParams() :
+* same as ZSTD_getCParams(), but @return a full `ZSTD_parameters` object instead of sub-component `ZSTD_compressionParameters`.
+* All fields of `ZSTD_frameParameters` are set to default : contentSize=1, checksum=0, noDictID=0 */
+ZSTDLIB_API ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
+
+/*! ZSTD_checkCParams() :
+* Ensure param values remain within authorized range */
+ZSTDLIB_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params);
+
+/*! ZSTD_adjustCParams() :
+ * optimize params for a given `srcSize` and `dictSize`.
+ * both values are optional, select `0` if unknown. */
+ZSTDLIB_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize);
+
+/*! ZSTD_compress_advanced() :
+* Same as ZSTD_compress_usingDict(), with fine-tune control over each compression parameter */
+ZSTDLIB_API size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize,
+ ZSTD_parameters params);
+
+/*! ZSTD_compress_usingCDict_advanced() :
+* Same as ZSTD_compress_usingCDict(), with fine-tune control over frame parameters */
+ZSTDLIB_API size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_CDict* cdict, ZSTD_frameParameters fParams);
+
+
+/*--- Advanced decompression functions ---*/
+
+/*! ZSTD_isFrame() :
+ * Tells if the content of `buffer` starts with a valid Frame Identifier.
+ * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
+ * Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.
+ * Note 3 : Skippable Frame Identifiers are considered valid. */
+ZSTDLIB_API unsigned ZSTD_isFrame(const void* buffer, size_t size);
+
+/*! ZSTD_createDDict_byReference() :
+ * Create a digested dictionary, ready to start decompression operation without startup delay.
+ * Dictionary content is referenced, and therefore stays in dictBuffer.
+ * It is important that dictBuffer outlives DDict,
+ * it must remain read accessible throughout the lifetime of DDict */
+ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize);
+
+
+/*! ZSTD_getDictID_fromDict() :
+ * Provides the dictID stored within dictionary.
+ * if @return == 0, the dictionary is not conformant with Zstandard specification.
+ * It can still be loaded, but as a content-only dictionary. */
+ZSTDLIB_API unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize);
+
+/*! ZSTD_getDictID_fromDDict() :
+ * Provides the dictID of the dictionary loaded into `ddict`.
+ * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
+ * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
+ZSTDLIB_API unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict);
+
+/*! ZSTD_getDictID_fromFrame() :
+ * Provides the dictID required to decompressed the frame stored within `src`.
+ * If @return == 0, the dictID could not be decoded.
+ * This could for one of the following reasons :
+ * - The frame does not require a dictionary to be decoded (most common case).
+ * - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information.
+ * Note : this use case also happens when using a non-conformant dictionary.
+ * - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).
+ * - This is not a Zstandard frame.
+ * When identifying the exact failure cause, it's possible to use ZSTD_getFrameHeader(), which will provide a more precise error code. */
+ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
+
+
+/********************************************************************
+* Advanced streaming functions
+********************************************************************/
+
+/*===== Advanced Streaming compression functions =====*/
+ZSTDLIB_API size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize); /**< pledgedSrcSize must be correct. If it is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN. Note that, for compatibility with older programs, "0" also disables frame content size field. It may be enabled in the future. */
+ZSTDLIB_API size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel); /**< creates of an internal CDict (incompatible with static CCtx), except if dict == NULL or dictSize < 8, in which case no dict is used. Note: dict is loaded with ZSTD_dm_auto (treated as a full zstd dictionary if it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy.*/
+ZSTDLIB_API size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, const void* dict, size_t dictSize,
+ ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize must be correct. If srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN. dict is loaded with ZSTD_dm_auto and ZSTD_dlm_byCopy. */
+ZSTDLIB_API size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict); /**< note : cdict will just be referenced, and must outlive compression session */
+ZSTDLIB_API size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams, unsigned long long pledgedSrcSize); /**< same as ZSTD_initCStream_usingCDict(), with control over frame parameters. pledgedSrcSize must be correct. If srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN. */
+
+/*! ZSTD_resetCStream() :
+ * start a new compression job, using same parameters from previous job.
+ * This is typically useful to skip dictionary loading stage, since it will re-use it in-place.
+ * Note that zcs must be init at least once before using ZSTD_resetCStream().
+ * If pledgedSrcSize is not known at reset time, use macro ZSTD_CONTENTSIZE_UNKNOWN.
+ * If pledgedSrcSize > 0, its value must be correct, as it will be written in header, and controlled at the end.
+ * For the time being, pledgedSrcSize==0 is interpreted as "srcSize unknown" for compatibility with older programs,
+ * but it will change to mean "empty" in future version, so use macro ZSTD_CONTENTSIZE_UNKNOWN instead.
+ * @return : 0, or an error code (which can be tested using ZSTD_isError())
+ */
+ZSTDLIB_API size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize);
+
+
+typedef struct {
+ unsigned long long ingested; /* nb input bytes read and buffered */
+ unsigned long long consumed; /* nb input bytes actually compressed */
+ unsigned long long produced; /* nb of compressed bytes generated and buffered */
+ unsigned long long flushed; /* nb of compressed bytes flushed : not provided; can be tracked from caller side */
+ unsigned currentJobID; /* MT only : latest started job nb */
+ unsigned nbActiveWorkers; /* MT only : nb of workers actively compressing at probe time */
+} ZSTD_frameProgression;
+
+/* ZSTD_getFrameProgression() :
+ * tells how much data has been ingested (read from input)
+ * consumed (input actually compressed) and produced (output) for current frame.
+ * Note : (ingested - consumed) is amount of input data buffered internally, not yet compressed.
+ * Aggregates progression inside active worker threads.
+ */
+ZSTDLIB_API ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx);
+
+/*! ZSTD_toFlushNow() :
+ * Tell how many bytes are ready to be flushed immediately.
+ * Useful for multithreading scenarios (nbWorkers >= 1).
+ * Probe the oldest active job, defined as oldest job not yet entirely flushed,
+ * and check its output buffer.
+ * @return : amount of data stored in oldest job and ready to be flushed immediately.
+ * if @return == 0, it means either :
+ * + there is no active job (could be checked with ZSTD_frameProgression()), or
+ * + oldest job is still actively compressing data,
+ * but everything it has produced has also been flushed so far,
+ * therefore flushing speed is currently limited by production speed of oldest job
+ * irrespective of the speed of concurrent newer jobs.
+ */
+ZSTDLIB_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx);
+
+
+
+/*===== Advanced Streaming decompression functions =====*/
+typedef enum { DStream_p_maxWindowSize } ZSTD_DStreamParameter_e;
+ZSTDLIB_API size_t ZSTD_setDStreamParameter(ZSTD_DStream* zds, ZSTD_DStreamParameter_e paramType, unsigned paramValue); /* obsolete : this API will be removed in a future version */
+ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize); /**< note: no dictionary will be used if dict == NULL or dictSize < 8 */
+ZSTDLIB_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict); /**< note : ddict is referenced, it must outlive decompression session */
+ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds); /**< re-use decompression parameters from previous init; saves dictionary loading */
+
+
+/*********************************************************************
+* Buffer-less and synchronous inner streaming functions
+*
+* This is an advanced API, giving full control over buffer management, for users which need direct control over memory.
+* But it's also a complex one, with several restrictions, documented below.
+* Prefer normal streaming API for an easier experience.
+********************************************************************* */
+
+/**
+ Buffer-less streaming compression (synchronous mode)
+
+ A ZSTD_CCtx object is required to track streaming operations.
+ Use ZSTD_createCCtx() / ZSTD_freeCCtx() to manage resource.
+ ZSTD_CCtx object can be re-used multiple times within successive compression operations.
+
+ Start by initializing a context.
+ Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression,
+ or ZSTD_compressBegin_advanced(), for finer parameter control.
+ It's also possible to duplicate a reference context which has already been initialized, using ZSTD_copyCCtx()
+
+ Then, consume your input using ZSTD_compressContinue().
+ There are some important considerations to keep in mind when using this advanced function :
+ - ZSTD_compressContinue() has no internal buffer. It uses externally provided buffers only.
+ - Interface is synchronous : input is consumed entirely and produces 1+ compressed blocks.
+ - Caller must ensure there is enough space in `dst` to store compressed data under worst case scenario.
+ Worst case evaluation is provided by ZSTD_compressBound().
+ ZSTD_compressContinue() doesn't guarantee recover after a failed compression.
+ - ZSTD_compressContinue() presumes prior input ***is still accessible and unmodified*** (up to maximum distance size, see WindowLog).
+ It remembers all previous contiguous blocks, plus one separated memory segment (which can itself consists of multiple contiguous blocks)
+ - ZSTD_compressContinue() detects that prior input has been overwritten when `src` buffer overlaps.
+ In which case, it will "discard" the relevant memory section from its history.
+
+ Finish a frame with ZSTD_compressEnd(), which will write the last block(s) and optional checksum.
+ It's possible to use srcSize==0, in which case, it will write a final empty block to end the frame.
+ Without last block mark, frames are considered unfinished (hence corrupted) by compliant decoders.
+
+ `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress again.
+*/
+
+/*===== Buffer-less streaming compression functions =====*/
+ZSTDLIB_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel);
+ZSTDLIB_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);
+ZSTDLIB_API size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize : If srcSize is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN */
+ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /**< note: fails if cdict==NULL */
+ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize); /* compression parameters are already set within cdict. pledgedSrcSize must be correct. If srcSize is not known, use macro ZSTD_CONTENTSIZE_UNKNOWN */
+ZSTDLIB_API size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**< note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */
+
+ZSTDLIB_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+ZSTDLIB_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+
+
+/*-
+ Buffer-less streaming decompression (synchronous mode)
+
+ A ZSTD_DCtx object is required to track streaming operations.
+ Use ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it.
+ A ZSTD_DCtx object can be re-used multiple times.
+
+ First typical operation is to retrieve frame parameters, using ZSTD_getFrameHeader().
+ Frame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough.
+ Data fragment must be large enough to ensure successful decoding.
+ `ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough.
+ @result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.
+ >0 : `srcSize` is too small, please provide at least @result bytes on next attempt.
+ errorCode, which can be tested using ZSTD_isError().
+
+ It fills a ZSTD_frameHeader structure with important information to correctly decode the frame,
+ such as the dictionary ID, content size, or maximum back-reference distance (`windowSize`).
+ Note that these values could be wrong, either because of data corruption, or because a 3rd party deliberately spoofs false information.
+ As a consequence, check that values remain within valid application range.
+ For example, do not allocate memory blindly, check that `windowSize` is within expectation.
+ Each application can set its own limits, depending on local restrictions.
+ For extended interoperability, it is recommended to support `windowSize` of at least 8 MB.
+
+ ZSTD_decompressContinue() needs previous data blocks during decompression, up to `windowSize` bytes.
+ ZSTD_decompressContinue() is very sensitive to contiguity,
+ if 2 blocks don't follow each other, make sure that either the compressor breaks contiguity at the same place,
+ or that previous contiguous segment is large enough to properly handle maximum back-reference distance.
+ There are multiple ways to guarantee this condition.
+
+ The most memory efficient way is to use a round buffer of sufficient size.
+ Sufficient size is determined by invoking ZSTD_decodingBufferSize_min(),
+ which can @return an error code if required value is too large for current system (in 32-bits mode).
+ In a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one,
+ up to the moment there is not enough room left in the buffer to guarantee decoding another full block,
+ which maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`.
+ At which point, decoding can resume from the beginning of the buffer.
+ Note that already decoded data stored in the buffer should be flushed before being overwritten.
+
+ There are alternatives possible, for example using two or more buffers of size `windowSize` each, though they consume more memory.
+
+ Finally, if you control the compression process, you can also ignore all buffer size rules,
+ as long as the encoder and decoder progress in "lock-step",
+ aka use exactly the same buffer sizes, break contiguity at the same place, etc.
+
+ Once buffers are setup, start decompression, with ZSTD_decompressBegin().
+ If decompression requires a dictionary, use ZSTD_decompressBegin_usingDict() or ZSTD_decompressBegin_usingDDict().
+
+ Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() alternatively.
+ ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue().
+ ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail.
+
+ @result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).
+ It can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item.
+ It can also be an error code, which can be tested with ZSTD_isError().
+
+ A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero.
+ Context can then be reset to start a new decompression.
+
+ Note : it's possible to know if next input to present is a header or a block, using ZSTD_nextInputType().
+ This information is not required to properly decode a frame.
+
+ == Special case : skippable frames ==
+
+ Skippable frames allow integration of user-defined data into a flow of concatenated frames.
+ Skippable frames will be ignored (skipped) by decompressor.
+ The format of skippable frames is as follows :
+ a) Skippable frame ID - 4 Bytes, Little endian format, any value from 0x184D2A50 to 0x184D2A5F
+ b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits
+ c) Frame Content - any content (User Data) of length equal to Frame Size
+ For skippable frames ZSTD_getFrameHeader() returns zfhPtr->frameType==ZSTD_skippableFrame.
+ For skippable frames ZSTD_decompressContinue() always returns 0 : it only skips the content.
+*/
+
+/*===== Buffer-less streaming decompression functions =====*/
+typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_frameType_e;
+typedef struct {
+ unsigned long long frameContentSize; /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */
+ unsigned long long windowSize; /* can be very large, up to <= frameContentSize */
+ unsigned blockSizeMax;
+ ZSTD_frameType_e frameType; /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */
+ unsigned headerSize;
+ unsigned dictID;
+ unsigned checksumFlag;
+} ZSTD_frameHeader;
+/** ZSTD_getFrameHeader() :
+ * decode Frame Header, or requires larger `srcSize`.
+ * @return : 0, `zfhPtr` is correctly filled,
+ * >0, `srcSize` is too small, value is wanted `srcSize` amount,
+ * or an error code, which can be tested using ZSTD_isError() */
+ZSTDLIB_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize); /**< doesn't consume input */
+ZSTDLIB_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize); /**< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */
+
+ZSTDLIB_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx);
+ZSTDLIB_API size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
+ZSTDLIB_API size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
+
+ZSTDLIB_API size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx);
+ZSTDLIB_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+
+/* misc */
+ZSTDLIB_API void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
+typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e;
+ZSTDLIB_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
+
+
+
+/* ============================================ */
+/** New advanced API (experimental) */
+/* ============================================ */
+
+/* API design :
+ * In this advanced API, parameters are pushed one by one into an existing context,
+ * using ZSTD_CCtx_set*() functions.
+ * Pushed parameters are sticky : they are applied to next job, and any subsequent job.
+ * It's possible to reset parameters to "default" using ZSTD_CCtx_reset().
+ * Important : "sticky" parameters only work with `ZSTD_compress_generic()` !
+ * For any other entry point, "sticky" parameters are ignored !
+ *
+ * This API is intended to replace all others advanced / experimental API entry points.
+ */
+
+/* note on enum design :
+ * All enum will be pinned to explicit values before reaching "stable API" status */
+
+typedef enum {
+ /* Opened question : should we have a format ZSTD_f_auto ?
+ * Today, it would mean exactly the same as ZSTD_f_zstd1.
+ * But, in the future, should several formats become supported,
+ * on the compression side, it would mean "default format".
+ * On the decompression side, it would mean "automatic format detection",
+ * so that ZSTD_f_zstd1 would mean "accept *only* zstd frames".
+ * Since meaning is a little different, another option could be to define different enums for compression and decompression.
+ * This question could be kept for later, when there are actually multiple formats to support,
+ * but there is also the question of pinning enum values, and pinning value `0` is especially important */
+ ZSTD_f_zstd1 = 0, /* zstd frame format, specified in zstd_compression_format.md (default) */
+ ZSTD_f_zstd1_magicless, /* Variant of zstd frame format, without initial 4-bytes magic number.
+ * Useful to save 4 bytes per generated frame.
+ * Decoder cannot recognise automatically this format, requiring instructions. */
+} ZSTD_format_e;
+
+typedef enum {
+ /* compression format */
+ ZSTD_p_format = 10, /* See ZSTD_format_e enum definition.
+ * Cast selected format as unsigned for ZSTD_CCtx_setParameter() compatibility. */
+
+ /* compression parameters */
+ ZSTD_p_compressionLevel=100, /* Update all compression parameters according to pre-defined cLevel table
+ * Default level is ZSTD_CLEVEL_DEFAULT==3.
+ * Special: value 0 means default, which is controlled by ZSTD_CLEVEL_DEFAULT.
+ * Note 1 : it's possible to pass a negative compression level by casting it to unsigned type.
+ * Note 2 : setting a level sets all default values of other compression parameters.
+ * Note 3 : setting compressionLevel automatically updates ZSTD_p_compressLiterals. */
+ ZSTD_p_windowLog, /* Maximum allowed back-reference distance, expressed as power of 2.
+ * Must be clamped between ZSTD_WINDOWLOG_MIN and ZSTD_WINDOWLOG_MAX.
+ * Special: value 0 means "use default windowLog".
+ * Note: Using a window size greater than ZSTD_MAXWINDOWSIZE_DEFAULT (default: 2^27)
+ * requires explicitly allowing such window size during decompression stage. */
+ ZSTD_p_hashLog, /* Size of the initial probe table, as a power of 2.
+ * Resulting table size is (1 << (hashLog+2)).
+ * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX.
+ * Larger tables improve compression ratio of strategies <= dFast,
+ * and improve speed of strategies > dFast.
+ * Special: value 0 means "use default hashLog". */
+ ZSTD_p_chainLog, /* Size of the multi-probe search table, as a power of 2.
+ * Resulting table size is (1 << (chainLog+2)).
+ * Must be clamped between ZSTD_CHAINLOG_MIN and ZSTD_CHAINLOG_MAX.
+ * Larger tables result in better and slower compression.
+ * This parameter is useless when using "fast" strategy.
+ * Note it's still useful when using "dfast" strategy,
+ * in which case it defines a secondary probe table.
+ * Special: value 0 means "use default chainLog". */
+ ZSTD_p_searchLog, /* Number of search attempts, as a power of 2.
+ * More attempts result in better and slower compression.
+ * This parameter is useless when using "fast" and "dFast" strategies.
+ * Special: value 0 means "use default searchLog". */
+ ZSTD_p_minMatch, /* Minimum size of searched matches (note : repCode matches can be smaller).
+ * Larger values make faster compression and decompression, but decrease ratio.
+ * Must be clamped between ZSTD_SEARCHLENGTH_MIN and ZSTD_SEARCHLENGTH_MAX.
+ * Note that currently, for all strategies < btopt, effective minimum is 4.
+ * , for all strategies > fast, effective maximum is 6.
+ * Special: value 0 means "use default minMatchLength". */
+ ZSTD_p_targetLength, /* Impact of this field depends on strategy.
+ * For strategies btopt & btultra:
+ * Length of Match considered "good enough" to stop search.
+ * Larger values make compression stronger, and slower.
+ * For strategy fast:
+ * Distance between match sampling.
+ * Larger values make compression faster, and weaker.
+ * Special: value 0 means "use default targetLength". */
+ ZSTD_p_compressionStrategy, /* See ZSTD_strategy enum definition.
+ * Cast selected strategy as unsigned for ZSTD_CCtx_setParameter() compatibility.
+ * The higher the value of selected strategy, the more complex it is,
+ * resulting in stronger and slower compression.
+ * Special: value 0 means "use default strategy". */
+
+ ZSTD_p_enableLongDistanceMatching=160, /* Enable long distance matching.
+ * This parameter is designed to improve compression ratio
+ * for large inputs, by finding large matches at long distance.
+ * It increases memory usage and window size.
+ * Note: enabling this parameter increases ZSTD_p_windowLog to 128 MB
+ * except when expressly set to a different value. */
+ ZSTD_p_ldmHashLog, /* Size of the table for long distance matching, as a power of 2.
+ * Larger values increase memory usage and compression ratio,
+ * but decrease compression speed.
+ * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX
+ * default: windowlog - 7.
+ * Special: value 0 means "automatically determine hashlog". */
+ ZSTD_p_ldmMinMatch, /* Minimum match size for long distance matcher.
+ * Larger/too small values usually decrease compression ratio.
+ * Must be clamped between ZSTD_LDM_MINMATCH_MIN and ZSTD_LDM_MINMATCH_MAX.
+ * Special: value 0 means "use default value" (default: 64). */
+ ZSTD_p_ldmBucketSizeLog, /* Log size of each bucket in the LDM hash table for collision resolution.
+ * Larger values improve collision resolution but decrease compression speed.
+ * The maximum value is ZSTD_LDM_BUCKETSIZELOG_MAX .
+ * Special: value 0 means "use default value" (default: 3). */
+ ZSTD_p_ldmHashEveryLog, /* Frequency of inserting/looking up entries in the LDM hash table.
+ * Must be clamped between 0 and (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN).
+ * Default is MAX(0, (windowLog - ldmHashLog)), optimizing hash table usage.
+ * Larger values improve compression speed.
+ * Deviating far from default value will likely result in a compression ratio decrease.
+ * Special: value 0 means "automatically determine hashEveryLog". */
+
+ /* frame parameters */
+ ZSTD_p_contentSizeFlag=200, /* Content size will be written into frame header _whenever known_ (default:1)
+ * Content size must be known at the beginning of compression,
+ * it is provided using ZSTD_CCtx_setPledgedSrcSize() */
+ ZSTD_p_checksumFlag, /* A 32-bits checksum of content is written at end of frame (default:0) */
+ ZSTD_p_dictIDFlag, /* When applicable, dictionary's ID is written into frame header (default:1) */
+
+ /* multi-threading parameters */
+ /* These parameters are only useful if multi-threading is enabled (ZSTD_MULTITHREAD).
+ * They return an error otherwise. */
+ ZSTD_p_nbWorkers=400, /* Select how many threads will be spawned to compress in parallel.
+ * When nbWorkers >= 1, triggers asynchronous mode :
+ * ZSTD_compress_generic() consumes some input, flush some output if possible, and immediately gives back control to caller,
+ * while compression work is performed in parallel, within worker threads.
+ * (note : a strong exception to this rule is when first invocation sets ZSTD_e_end : it becomes a blocking call).
+ * More workers improve speed, but also increase memory usage.
+ * Default value is `0`, aka "single-threaded mode" : no worker is spawned, compression is performed inside Caller's thread, all invocations are blocking */
+ ZSTD_p_jobSize, /* Size of a compression job. This value is enforced only in non-blocking mode.
+ * Each compression job is completed in parallel, so this value indirectly controls the nb of active threads.
+ * 0 means default, which is dynamically determined based on compression parameters.
+ * Job size must be a minimum of overlapSize, or 1 MB, whichever is largest.
+ * The minimum size is automatically and transparently enforced */
+ ZSTD_p_overlapSizeLog, /* Size of previous input reloaded at the beginning of each job.
+ * 0 => no overlap, 6(default) => use 1/8th of windowSize, >=9 => use full windowSize */
+
+ /* =================================================================== */
+ /* experimental parameters - no stability guaranteed */
+ /* =================================================================== */
+
+ ZSTD_p_forceMaxWindow=1100, /* Force back-reference distances to remain < windowSize,
+ * even when referencing into Dictionary content (default:0) */
+ ZSTD_p_forceAttachDict, /* ZSTD supports usage of a CDict in-place
+ * (avoiding having to copy the compression tables
+ * from the CDict into the working context). Using
+ * a CDict in this way saves an initial setup step,
+ * but comes at the cost of more work per byte of
+ * input. ZSTD has a simple internal heuristic that
+ * guesses which strategy will be faster. You can
+ * use this flag to override that guess.
+ *
+ * Note that the by-reference, in-place strategy is
+ * only used when reusing a compression context
+ * with compatible compression parameters. (If
+ * incompatible / uninitialized, the working
+ * context needs to be cleared anyways, which is
+ * about as expensive as overwriting it with the
+ * dictionary context, so there's no savings in
+ * using the CDict by-ref.)
+ *
+ * Values greater than 0 force attaching the dict.
+ * Values less than 0 force copying the dict.
+ * 0 selects the default heuristic-guided behavior.
+ */
+
+} ZSTD_cParameter;
+
+
+/*! ZSTD_CCtx_setParameter() :
+ * Set one compression parameter, selected by enum ZSTD_cParameter.
+ * Setting a parameter is generally only possible during frame initialization (before starting compression).
+ * Exception : when using multi-threading mode (nbThreads >= 1),
+ * following parameters can be updated _during_ compression (within same frame):
+ * => compressionLevel, hashLog, chainLog, searchLog, minMatch, targetLength and strategy.
+ * new parameters will be active on next job, or after a flush().
+ * Note : when `value` type is not unsigned (int, or enum), cast it to unsigned for proper type checking.
+ * @result : informational value (typically, value being set, correctly clamped),
+ * or an error code (which can be tested with ZSTD_isError()). */
+ZSTDLIB_API size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, unsigned value);
+
+/*! ZSTD_CCtx_getParameter() :
+ * Get the requested value of one compression parameter, selected by enum ZSTD_cParameter.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_CCtx_getParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, unsigned* value);
+
+/*! ZSTD_CCtx_setPledgedSrcSize() :
+ * Total input data size to be compressed as a single frame.
+ * This value will be controlled at the end, and result in error if not respected.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Note 1 : 0 means zero, empty.
+ * In order to mean "unknown content size", pass constant ZSTD_CONTENTSIZE_UNKNOWN.
+ * ZSTD_CONTENTSIZE_UNKNOWN is default value for any new compression job.
+ * Note 2 : If all data is provided and consumed in a single round,
+ * this value is overriden by srcSize instead. */
+ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize);
+
+/*! ZSTD_CCtx_loadDictionary() :
+ * Create an internal CDict from `dict` buffer.
+ * Decompression will have to use same dictionary.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Special: Adding a NULL (or 0-size) dictionary invalidates previous dictionary,
+ * meaning "return to no-dictionary mode".
+ * Note 1 : Dictionary will be used for all future compression jobs.
+ * To return to "no-dictionary" situation, load a NULL dictionary
+ * Note 2 : Loading a dictionary involves building tables, which are dependent on compression parameters.
+ * For this reason, compression parameters cannot be changed anymore after loading a dictionary.
+ * It's also a CPU consuming operation, with non-negligible impact on latency.
+ * Note 3 :`dict` content will be copied internally.
+ * Use ZSTD_CCtx_loadDictionary_byReference() to reference dictionary content instead.
+ * In such a case, dictionary buffer must outlive its users.
+ * Note 4 : Use ZSTD_CCtx_loadDictionary_advanced()
+ * to precisely select how dictionary content must be interpreted. */
+ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
+ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
+ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
+
+
+/*! ZSTD_CCtx_refCDict() :
+ * Reference a prepared dictionary, to be used for all next compression jobs.
+ * Note that compression parameters are enforced from within CDict,
+ * and supercede any compression parameter previously set within CCtx.
+ * The dictionary will remain valid for future compression jobs using same CCtx.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Special : adding a NULL CDict means "return to no-dictionary mode".
+ * Note 1 : Currently, only one dictionary can be managed.
+ * Adding a new dictionary effectively "discards" any previous one.
+ * Note 2 : CDict is just referenced, its lifetime must outlive CCtx. */
+ZSTDLIB_API size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict);
+
+/*! ZSTD_CCtx_refPrefix() :
+ * Reference a prefix (single-usage dictionary) for next compression job.
+ * Decompression will need same prefix to properly regenerate data.
+ * Compressing with a prefix is similar in outcome as performing a diff and compressing it,
+ * but performs much faster, especially during decompression (compression speed is tunable with compression level).
+ * Note that prefix is **only used once**. Tables are discarded at end of compression job (ZSTD_e_end).
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary
+ * Note 1 : Prefix buffer is referenced. It **must** outlive compression job.
+ * Its contain must remain unmodified up to end of compression (ZSTD_e_end).
+ * Note 2 : If the intention is to diff some large src data blob with some prior version of itself,
+ * ensure that the window size is large enough to contain the entire source.
+ * See ZSTD_p_windowLog.
+ * Note 3 : Referencing a prefix involves building tables, which are dependent on compression parameters.
+ * It's a CPU consuming operation, with non-negligible impact on latency.
+ * If there is a need to use same prefix multiple times, consider loadDictionary instead.
+ * Note 4 : By default, the prefix is treated as raw content (ZSTD_dm_rawContent).
+ * Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode. */
+ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx,
+ const void* prefix, size_t prefixSize);
+ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx,
+ const void* prefix, size_t prefixSize,
+ ZSTD_dictContentType_e dictContentType);
+
+/*! ZSTD_CCtx_reset() :
+ * Return a CCtx to clean state.
+ * Useful after an error, or to interrupt an ongoing compression job and start a new one.
+ * Any internal data not yet flushed is cancelled.
+ * The parameters and dictionary are kept unchanged, to reset them use ZSTD_CCtx_resetParameters().
+ */
+ZSTDLIB_API void ZSTD_CCtx_reset(ZSTD_CCtx* cctx);
+
+/*! ZSTD_CCtx_resetParameters() :
+ * All parameters are back to default values (compression level is ZSTD_CLEVEL_DEFAULT).
+ * Dictionary (if any) is dropped.
+ * Resetting parameters is only possible during frame initialization (before starting compression).
+ * To reset the context use ZSTD_CCtx_reset().
+ * @return 0 or an error code (which can be checked with ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_CCtx_resetParameters(ZSTD_CCtx* cctx);
+
+
+
+typedef enum {
+ ZSTD_e_continue=0, /* collect more data, encoder decides when to output compressed result, for optimal conditions */
+ ZSTD_e_flush, /* flush any data provided so far - frame will continue, future data can still reference previous data for better compression */
+ ZSTD_e_end /* flush any remaining data and close current frame. Any additional data starts a new frame. */
+} ZSTD_EndDirective;
+
+/*! ZSTD_compress_generic() :
+ * Behave about the same as ZSTD_compressStream. To note :
+ * - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_setParameter()
+ * - Compression parameters cannot be changed once compression is started.
+ * - outpot->pos must be <= dstCapacity, input->pos must be <= srcSize
+ * - outpot->pos and input->pos will be updated. They are guaranteed to remain below their respective limit.
+ * - In single-thread mode (default), function is blocking : it completed its job before returning to caller.
+ * - In multi-thread mode, function is non-blocking : it just acquires a copy of input, and distribute job to internal worker threads,
+ * and then immediately returns, just indicating that there is some data remaining to be flushed.
+ * The function nonetheless guarantees forward progress : it will return only after it reads or write at least 1+ byte.
+ * - Exception : in multi-threading mode, if the first call requests a ZSTD_e_end directive, it is blocking : it will complete compression before giving back control to caller.
+ * - @return provides a minimum amount of data remaining to be flushed from internal buffers
+ * or an error code, which can be tested using ZSTD_isError().
+ * if @return != 0, flush is not fully completed, there is still some data left within internal buffers.
+ * This is useful for ZSTD_e_flush, since in this case more flushes are necessary to empty all buffers.
+ * For ZSTD_e_end, @return == 0 when internal buffers are fully flushed and frame is completed.
+ * - after a ZSTD_e_end directive, if internal buffer is not fully flushed (@return != 0),
+ * only ZSTD_e_end or ZSTD_e_flush operations are allowed.
+ * Before starting a new compression job, or changing compression parameters,
+ * it is required to fully flush internal buffers.
+ */
+ZSTDLIB_API size_t ZSTD_compress_generic (ZSTD_CCtx* cctx,
+ ZSTD_outBuffer* output,
+ ZSTD_inBuffer* input,
+ ZSTD_EndDirective endOp);
+
+
+/*! ZSTD_compress_generic_simpleArgs() :
+ * Same as ZSTD_compress_generic(),
+ * but using only integral types as arguments.
+ * Argument list is larger than ZSTD_{in,out}Buffer,
+ * but can be helpful for binders from dynamic languages
+ * which have troubles handling structures containing memory pointers.
+ */
+ZSTDLIB_API size_t ZSTD_compress_generic_simpleArgs (
+ ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity, size_t* dstPos,
+ const void* src, size_t srcSize, size_t* srcPos,
+ ZSTD_EndDirective endOp);
+
+
+/*! ZSTD_CCtx_params :
+ * Quick howto :
+ * - ZSTD_createCCtxParams() : Create a ZSTD_CCtx_params structure
+ * - ZSTD_CCtxParam_setParameter() : Push parameters one by one into
+ * an existing ZSTD_CCtx_params structure.
+ * This is similar to
+ * ZSTD_CCtx_setParameter().
+ * - ZSTD_CCtx_setParametersUsingCCtxParams() : Apply parameters to
+ * an existing CCtx.
+ * These parameters will be applied to
+ * all subsequent compression jobs.
+ * - ZSTD_compress_generic() : Do compression using the CCtx.
+ * - ZSTD_freeCCtxParams() : Free the memory.
+ *
+ * This can be used with ZSTD_estimateCCtxSize_advanced_usingCCtxParams()
+ * for static allocation for single-threaded compression.
+ */
+ZSTDLIB_API ZSTD_CCtx_params* ZSTD_createCCtxParams(void);
+ZSTDLIB_API size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params);
+
+
+/*! ZSTD_CCtxParams_reset() :
+ * Reset params to default values.
+ */
+ZSTDLIB_API size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params);
+
+/*! ZSTD_CCtxParams_init() :
+ * Initializes the compression parameters of cctxParams according to
+ * compression level. All other parameters are reset to their default values.
+ */
+ZSTDLIB_API size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel);
+
+/*! ZSTD_CCtxParams_init_advanced() :
+ * Initializes the compression and frame parameters of cctxParams according to
+ * params. All other parameters are reset to their default values.
+ */
+ZSTDLIB_API size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params);
+
+
+/*! ZSTD_CCtxParam_setParameter() :
+ * Similar to ZSTD_CCtx_setParameter.
+ * Set one compression parameter, selected by enum ZSTD_cParameter.
+ * Parameters must be applied to a ZSTD_CCtx using ZSTD_CCtx_setParametersUsingCCtxParams().
+ * Note : when `value` is an enum, cast it to unsigned for proper type checking.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_CCtxParam_setParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, unsigned value);
+
+/*! ZSTD_CCtxParam_getParameter() :
+ * Similar to ZSTD_CCtx_getParameter.
+ * Get the requested value of one compression parameter, selected by enum ZSTD_cParameter.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_CCtxParam_getParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, unsigned* value);
+
+/*! ZSTD_CCtx_setParametersUsingCCtxParams() :
+ * Apply a set of ZSTD_CCtx_params to the compression context.
+ * This can be done even after compression is started,
+ * if nbWorkers==0, this will have no impact until a new compression is started.
+ * if nbWorkers>=1, new parameters will be picked up at next job,
+ * with a few restrictions (windowLog, pledgedSrcSize, nbWorkers, jobSize, and overlapLog are not updated).
+ */
+ZSTDLIB_API size_t ZSTD_CCtx_setParametersUsingCCtxParams(
+ ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params);
+
+
+/* ==================================== */
+/*=== Advanced decompression API ===*/
+/* ==================================== */
+
+/* The following API works the same way as the advanced compression API :
+ * a context is created, parameters are pushed into it one by one,
+ * then the context can be used to decompress data using an interface similar to the straming API.
+ */
+
+/*! ZSTD_DCtx_loadDictionary() :
+ * Create an internal DDict from dict buffer,
+ * to be used to decompress next frames.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary,
+ * meaning "return to no-dictionary mode".
+ * Note 1 : `dict` content will be copied internally.
+ * Use ZSTD_DCtx_loadDictionary_byReference()
+ * to reference dictionary content instead.
+ * In which case, the dictionary buffer must outlive its users.
+ * Note 2 : Loading a dictionary involves building tables,
+ * which has a non-negligible impact on CPU usage and latency.
+ * Note 3 : Use ZSTD_DCtx_loadDictionary_advanced() to select
+ * how dictionary content will be interpreted and loaded.
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
+ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
+ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
+
+
+/*! ZSTD_DCtx_refDDict() :
+ * Reference a prepared dictionary, to be used to decompress next frames.
+ * The dictionary remains active for decompression of future frames using same DCtx.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Note 1 : Currently, only one dictionary can be managed.
+ * Referencing a new dictionary effectively "discards" any previous one.
+ * Special : adding a NULL DDict means "return to no-dictionary mode".
+ * Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx.
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
+
+
+/*! ZSTD_DCtx_refPrefix() :
+ * Reference a prefix (single-usage dictionary) for next compression job.
+ * This is the reverse operation of ZSTD_CCtx_refPrefix(),
+ * and must use the same prefix as the one used during compression.
+ * Prefix is **only used once**. Reference is discarded at end of frame.
+ * End of frame is reached when ZSTD_DCtx_decompress_generic() returns 0.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Note 1 : Adding any prefix (including NULL) invalidates any previously set prefix or dictionary
+ * Note 2 : Prefix buffer is referenced. It **must** outlive decompression job.
+ * Prefix buffer must remain unmodified up to the end of frame,
+ * reached when ZSTD_DCtx_decompress_generic() returns 0.
+ * Note 3 : By default, the prefix is treated as raw content (ZSTD_dm_rawContent).
+ * Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode.
+ * Note 4 : Referencing a raw content prefix has almost no cpu nor memory cost.
+ * A fulldict prefix is more costly though.
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx,
+ const void* prefix, size_t prefixSize);
+ZSTDLIB_API size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx,
+ const void* prefix, size_t prefixSize,
+ ZSTD_dictContentType_e dictContentType);
+
+
+/*! ZSTD_DCtx_setMaxWindowSize() :
+ * Refuses allocating internal buffers for frames requiring a window size larger than provided limit.
+ * This is useful to prevent a decoder context from reserving too much memory for itself (potential attack scenario).
+ * This parameter is only useful in streaming mode, since no internal buffer is allocated in direct mode.
+ * By default, a decompression context accepts all window sizes <= (1 << ZSTD_WINDOWLOG_MAX)
+ * @return : 0, or an error code (which can be tested using ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize);
+
+
+/*! ZSTD_DCtx_setFormat() :
+ * Instruct the decoder context about what kind of data to decode next.
+ * This instruction is mandatory to decode data without a fully-formed header,
+ * such ZSTD_f_zstd1_magicless for example.
+ * @return : 0, or an error code (which can be tested using ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format);
+
+
+/*! ZSTD_getFrameHeader_advanced() :
+ * same as ZSTD_getFrameHeader(),
+ * with added capability to select a format (like ZSTD_f_zstd1_magicless) */
+ZSTDLIB_API size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr,
+ const void* src, size_t srcSize, ZSTD_format_e format);
+
+
+/*! ZSTD_decompress_generic() :
+ * Behave the same as ZSTD_decompressStream.
+ * Decompression parameters cannot be changed once decompression is started.
+ * @return : an error code, which can be tested using ZSTD_isError()
+ * if >0, a hint, nb of expected input bytes for next invocation.
+ * `0` means : a frame has just been fully decoded and flushed.
+ */
+ZSTDLIB_API size_t ZSTD_decompress_generic(ZSTD_DCtx* dctx,
+ ZSTD_outBuffer* output,
+ ZSTD_inBuffer* input);
+
+
+/*! ZSTD_decompress_generic_simpleArgs() :
+ * Same as ZSTD_decompress_generic(),
+ * but using only integral types as arguments.
+ * Argument list is larger than ZSTD_{in,out}Buffer,
+ * but can be helpful for binders from dynamic languages
+ * which have troubles handling structures containing memory pointers.
+ */
+ZSTDLIB_API size_t ZSTD_decompress_generic_simpleArgs (
+ ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity, size_t* dstPos,
+ const void* src, size_t srcSize, size_t* srcPos);
+
+
+/*! ZSTD_DCtx_reset() :
+ * Return a DCtx to clean state.
+ * If a decompression was ongoing, any internal data not yet flushed is cancelled.
+ * All parameters are back to default values, including sticky ones.
+ * Dictionary (if any) is dropped.
+ * Parameters can be modified again after a reset.
+ */
+ZSTDLIB_API void ZSTD_DCtx_reset(ZSTD_DCtx* dctx);
+
+
+
+/* ============================ */
+/** Block level API */
+/* ============================ */
+
+/*!
+ Block functions produce and decode raw zstd blocks, without frame metadata.
+ Frame metadata cost is typically ~18 bytes, which can be non-negligible for very small blocks (< 100 bytes).
+ User will have to take in charge required information to regenerate data, such as compressed and content sizes.
+
+ A few rules to respect :
+ - Compressing and decompressing require a context structure
+ + Use ZSTD_createCCtx() and ZSTD_createDCtx()
+ - It is necessary to init context before starting
+ + compression : any ZSTD_compressBegin*() variant, including with dictionary
+ + decompression : any ZSTD_decompressBegin*() variant, including with dictionary
+ + copyCCtx() and copyDCtx() can be used too
+ - Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB
+ + If input is larger than a block size, it's necessary to split input data into multiple blocks
+ + For inputs larger than a single block size, consider using the regular ZSTD_compress() instead.
+ Frame metadata is not that costly, and quickly becomes negligible as source size grows larger.
+ - When a block is considered not compressible enough, ZSTD_compressBlock() result will be zero.
+ In which case, nothing is produced into `dst`.
+ + User must test for such outcome and deal directly with uncompressed data
+ + ZSTD_decompressBlock() doesn't accept uncompressed data as input !!!
+ + In case of multiple successive blocks, should some of them be uncompressed,
+ decoder must be informed of their existence in order to follow proper history.
+ Use ZSTD_insertBlock() for such a case.
+*/
+
+/*===== Raw zstd block functions =====*/
+ZSTDLIB_API size_t ZSTD_getBlockSize (const ZSTD_CCtx* cctx);
+ZSTDLIB_API size_t ZSTD_compressBlock (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+ZSTDLIB_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+ZSTDLIB_API size_t ZSTD_insertBlock (ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize); /**< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression. */
+
+
+#endif /* ZSTD_H_ZSTD_STATIC_LINKING_ONLY */
+
+#if defined (__cplusplus)
+}
+#endif
diff --git a/grub-core/lib/zstd/zstd_common.c b/grub-core/lib/zstd/zstd_common.c
new file mode 100644
index 0000000..6f05d24
--- /dev/null
+++ b/grub-core/lib/zstd/zstd_common.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+
+/*-*************************************
+* Dependencies
+***************************************/
+#include <stdlib.h> /* malloc, calloc, free */
+#include <string.h> /* memset */
+#include "error_private.h"
+#include "zstd_internal.h"
+
+
+/*-****************************************
+* Version
+******************************************/
+unsigned ZSTD_versionNumber(void) { return ZSTD_VERSION_NUMBER; }
+
+const char* ZSTD_versionString(void) { return ZSTD_VERSION_STRING; }
+
+
+/*-****************************************
+* ZSTD Error Management
+******************************************/
+/*! ZSTD_isError() :
+ * tells if a return value is an error code */
+unsigned ZSTD_isError(size_t code) { return ERR_isError(code); }
+
+/*! ZSTD_getErrorName() :
+ * provides error code string from function result (useful for debugging) */
+const char* ZSTD_getErrorName(size_t code) { return ERR_getErrorName(code); }
+
+/*! ZSTD_getError() :
+ * convert a `size_t` function result into a proper ZSTD_errorCode enum */
+ZSTD_ErrorCode ZSTD_getErrorCode(size_t code) { return ERR_getErrorCode(code); }
+
+/*! ZSTD_getErrorString() :
+ * provides error code string from enum */
+const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorString(code); }
+
+
+
+/*=**************************************************************
+* Custom allocator
+****************************************************************/
+void* ZSTD_malloc(size_t size, ZSTD_customMem customMem)
+{
+ if (customMem.customAlloc)
+ return customMem.customAlloc(customMem.opaque, size);
+ return malloc(size);
+}
+
+void* ZSTD_calloc(size_t size, ZSTD_customMem customMem)
+{
+ if (customMem.customAlloc) {
+ /* calloc implemented as malloc+memset;
+ * not as efficient as calloc, but next best guess for custom malloc */
+ void* const ptr = customMem.customAlloc(customMem.opaque, size);
+ memset(ptr, 0, size);
+ return ptr;
+ }
+ return calloc(1, size);
+}
+
+void ZSTD_free(void* ptr, ZSTD_customMem customMem)
+{
+ if (ptr!=NULL) {
+ if (customMem.customFree)
+ customMem.customFree(customMem.opaque, ptr);
+ else
+ free(ptr);
+ }
+}
diff --git a/grub-core/lib/zstd/zstd_decompress.c b/grub-core/lib/zstd/zstd_decompress.c
new file mode 100644
index 0000000..e4b5670
--- /dev/null
+++ b/grub-core/lib/zstd/zstd_decompress.c
@@ -0,0 +1,3108 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+/* ***************************************************************
+* Tuning parameters
+*****************************************************************/
+/*!
+ * HEAPMODE :
+ * Select how default decompression function ZSTD_decompress() allocates its context,
+ * on stack (0), or into heap (1, default; requires malloc()).
+ * Note that functions with explicit context such as ZSTD_decompressDCtx() are unaffected.
+ */
+#ifndef ZSTD_HEAPMODE
+# define ZSTD_HEAPMODE 1
+#endif
+
+/*!
+* LEGACY_SUPPORT :
+* if set to 1+, ZSTD_decompress() can decode older formats (v0.1+)
+*/
+#ifndef ZSTD_LEGACY_SUPPORT
+# define ZSTD_LEGACY_SUPPORT 0
+#endif
+
+/*!
+ * MAXWINDOWSIZE_DEFAULT :
+ * maximum window size accepted by DStream __by default__.
+ * Frames requiring more memory will be rejected.
+ * It's possible to set a different limit using ZSTD_DCtx_setMaxWindowSize().
+ */
+#ifndef ZSTD_MAXWINDOWSIZE_DEFAULT
+# define ZSTD_MAXWINDOWSIZE_DEFAULT (((U32)1 << ZSTD_WINDOWLOG_DEFAULTMAX) + 1)
+#endif
+
+/*!
+ * NO_FORWARD_PROGRESS_MAX :
+ * maximum allowed nb of calls to ZSTD_decompressStream() and ZSTD_decompress_generic()
+ * without any forward progress
+ * (defined as: no byte read from input, and no byte flushed to output)
+ * before triggering an error.
+ */
+#ifndef ZSTD_NO_FORWARD_PROGRESS_MAX
+# define ZSTD_NO_FORWARD_PROGRESS_MAX 16
+#endif
+
+
+/*-*******************************************************
+* Dependencies
+*********************************************************/
+#include <string.h> /* memcpy, memmove, memset */
+#include "compiler.h" /* prefetch */
+#include "cpu.h" /* bmi2 */
+#include "mem.h" /* low level memory routines */
+#define FSE_STATIC_LINKING_ONLY
+#include "fse.h"
+#define HUF_STATIC_LINKING_ONLY
+#include "huf.h"
+#include "zstd_internal.h"
+
+#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
+# include "zstd_legacy.h"
+#endif
+
+static const void* ZSTD_DDictDictContent(const ZSTD_DDict* ddict);
+static size_t ZSTD_DDictDictSize(const ZSTD_DDict* ddict);
+
+
+/*-*************************************
+* Errors
+***************************************/
+#define ZSTD_isError ERR_isError /* for inlining */
+#define FSE_isError ERR_isError
+#define HUF_isError ERR_isError
+
+
+/*_*******************************************************
+* Memory operations
+**********************************************************/
+static void ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
+
+
+/*-*************************************************************
+* Context management
+***************************************************************/
+typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader,
+ ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock,
+ ZSTDds_decompressLastBlock, ZSTDds_checkChecksum,
+ ZSTDds_decodeSkippableHeader, ZSTDds_skipFrame } ZSTD_dStage;
+
+typedef enum { zdss_init=0, zdss_loadHeader,
+ zdss_read, zdss_load, zdss_flush } ZSTD_dStreamStage;
+
+
+typedef struct {
+ U32 fastMode;
+ U32 tableLog;
+} ZSTD_seqSymbol_header;
+
+typedef struct {
+ U16 nextState;
+ BYTE nbAdditionalBits;
+ BYTE nbBits;
+ U32 baseValue;
+} ZSTD_seqSymbol;
+
+#define SEQSYMBOL_TABLE_SIZE(log) (1 + (1 << (log)))
+
+typedef struct {
+ ZSTD_seqSymbol LLTable[SEQSYMBOL_TABLE_SIZE(LLFSELog)]; /* Note : Space reserved for FSE Tables */
+ ZSTD_seqSymbol OFTable[SEQSYMBOL_TABLE_SIZE(OffFSELog)]; /* is also used as temporary workspace while building hufTable during DDict creation */
+ ZSTD_seqSymbol MLTable[SEQSYMBOL_TABLE_SIZE(MLFSELog)]; /* and therefore must be at least HUF_DECOMPRESS_WORKSPACE_SIZE large */
+ HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)]; /* can accommodate HUF_decompress4X */
+ U32 rep[ZSTD_REP_NUM];
+} ZSTD_entropyDTables_t;
+
+struct ZSTD_DCtx_s
+{
+ const ZSTD_seqSymbol* LLTptr;
+ const ZSTD_seqSymbol* MLTptr;
+ const ZSTD_seqSymbol* OFTptr;
+ const HUF_DTable* HUFptr;
+ ZSTD_entropyDTables_t entropy;
+ U32 workspace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; /* space needed when building huffman tables */
+ const void* previousDstEnd; /* detect continuity */
+ const void* prefixStart; /* start of current segment */
+ const void* virtualStart; /* virtual start of previous segment if it was just before current one */
+ const void* dictEnd; /* end of previous segment */
+ size_t expected;
+ ZSTD_frameHeader fParams;
+ U64 decodedSize;
+ blockType_e bType; /* used in ZSTD_decompressContinue(), store blockType between block header decoding and block decompression stages */
+ ZSTD_dStage stage;
+ U32 litEntropy;
+ U32 fseEntropy;
+ XXH64_state_t xxhState;
+ size_t headerSize;
+ ZSTD_format_e format;
+ const BYTE* litPtr;
+ ZSTD_customMem customMem;
+ size_t litSize;
+ size_t rleSize;
+ size_t staticSize;
+ int bmi2; /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
+
+ /* dictionary */
+ ZSTD_DDict* ddictLocal;
+ const ZSTD_DDict* ddict; /* set by ZSTD_initDStream_usingDDict(), or ZSTD_DCtx_refDDict() */
+ U32 dictID;
+ int ddictIsCold; /* if == 1 : dictionary is "new" for working context, and presumed "cold" (not in cpu cache) */
+
+ /* streaming */
+ ZSTD_dStreamStage streamStage;
+ char* inBuff;
+ size_t inBuffSize;
+ size_t inPos;
+ size_t maxWindowSize;
+ char* outBuff;
+ size_t outBuffSize;
+ size_t outStart;
+ size_t outEnd;
+ size_t lhSize;
+ void* legacyContext;
+ U32 previousLegacyVersion;
+ U32 legacyVersion;
+ U32 hostageByte;
+ int noForwardProgress;
+
+ /* workspace */
+ BYTE litBuffer[ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH];
+ BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX];
+}; /* typedef'd to ZSTD_DCtx within "zstd.h" */
+
+size_t ZSTD_sizeof_DCtx (const ZSTD_DCtx* dctx)
+{
+ if (dctx==NULL) return 0; /* support sizeof NULL */
+ return sizeof(*dctx)
+ + ZSTD_sizeof_DDict(dctx->ddictLocal)
+ + dctx->inBuffSize + dctx->outBuffSize;
+}
+
+size_t ZSTD_estimateDCtxSize(void) { return sizeof(ZSTD_DCtx); }
+
+
+static size_t ZSTD_startingInputLength(ZSTD_format_e format)
+{
+ size_t const startingInputLength = (format==ZSTD_f_zstd1_magicless) ?
+ ZSTD_frameHeaderSize_prefix - ZSTD_FRAMEIDSIZE :
+ ZSTD_frameHeaderSize_prefix;
+ ZSTD_STATIC_ASSERT(ZSTD_FRAMEHEADERSIZE_PREFIX >= ZSTD_FRAMEIDSIZE);
+ /* only supports formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless */
+ assert( (format == ZSTD_f_zstd1) || (format == ZSTD_f_zstd1_magicless) );
+ return startingInputLength;
+}
+
+static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx)
+{
+ dctx->format = ZSTD_f_zstd1; /* ZSTD_decompressBegin() invokes ZSTD_startingInputLength() with argument dctx->format */
+ dctx->staticSize = 0;
+ dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;
+ dctx->ddict = NULL;
+ dctx->ddictLocal = NULL;
+ dctx->dictEnd = NULL;
+ dctx->ddictIsCold = 0;
+ dctx->inBuff = NULL;
+ dctx->inBuffSize = 0;
+ dctx->outBuffSize = 0;
+ dctx->streamStage = zdss_init;
+ dctx->legacyContext = NULL;
+ dctx->previousLegacyVersion = 0;
+ dctx->noForwardProgress = 0;
+ dctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
+}
+
+ZSTD_DCtx* ZSTD_initStaticDCtx(void *workspace, size_t workspaceSize)
+{
+ ZSTD_DCtx* const dctx = (ZSTD_DCtx*) workspace;
+
+ if ((size_t)workspace & 7) return NULL; /* 8-aligned */
+ if (workspaceSize < sizeof(ZSTD_DCtx)) return NULL; /* minimum size */
+
+ ZSTD_initDCtx_internal(dctx);
+ dctx->staticSize = workspaceSize;
+ dctx->inBuff = (char*)(dctx+1);
+ return dctx;
+}
+
+ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem)
+{
+ if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
+
+ { ZSTD_DCtx* const dctx = (ZSTD_DCtx*)ZSTD_malloc(sizeof(*dctx), customMem);
+ if (!dctx) return NULL;
+ dctx->customMem = customMem;
+ ZSTD_initDCtx_internal(dctx);
+ return dctx;
+ }
+}
+
+ZSTD_DCtx* ZSTD_createDCtx(void)
+{
+ DEBUGLOG(3, "ZSTD_createDCtx");
+ return ZSTD_createDCtx_advanced(ZSTD_defaultCMem);
+}
+
+size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx)
+{
+ if (dctx==NULL) return 0; /* support free on NULL */
+ if (dctx->staticSize) return ERROR(memory_allocation); /* not compatible with static DCtx */
+ { ZSTD_customMem const cMem = dctx->customMem;
+ ZSTD_freeDDict(dctx->ddictLocal);
+ dctx->ddictLocal = NULL;
+ ZSTD_free(dctx->inBuff, cMem);
+ dctx->inBuff = NULL;
+#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
+ if (dctx->legacyContext)
+ ZSTD_freeLegacyStreamContext(dctx->legacyContext, dctx->previousLegacyVersion);
+#endif
+ ZSTD_free(dctx, cMem);
+ return 0;
+ }
+}
+
+/* no longer useful */
+void ZSTD_copyDCtx(ZSTD_DCtx* dstDCtx, const ZSTD_DCtx* srcDCtx)
+{
+ size_t const toCopy = (size_t)((char*)(&dstDCtx->inBuff) - (char*)dstDCtx);
+ memcpy(dstDCtx, srcDCtx, toCopy); /* no need to copy workspace */
+}
+
+
+/*-*************************************************************
+ * Frame header decoding
+ ***************************************************************/
+
+/*! ZSTD_isFrame() :
+ * Tells if the content of `buffer` starts with a valid Frame Identifier.
+ * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
+ * Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.
+ * Note 3 : Skippable Frame Identifiers are considered valid. */
+unsigned ZSTD_isFrame(const void* buffer, size_t size)
+{
+ if (size < ZSTD_FRAMEIDSIZE) return 0;
+ { U32 const magic = MEM_readLE32(buffer);
+ if (magic == ZSTD_MAGICNUMBER) return 1;
+ if ((magic & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) return 1;
+ }
+#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
+ if (ZSTD_isLegacy(buffer, size)) return 1;
+#endif
+ return 0;
+}
+
+/** ZSTD_frameHeaderSize_internal() :
+ * srcSize must be large enough to reach header size fields.
+ * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless.
+ * @return : size of the Frame Header
+ * or an error code, which can be tested with ZSTD_isError() */
+static size_t ZSTD_frameHeaderSize_internal(const void* src, size_t srcSize, ZSTD_format_e format)
+{
+ size_t const minInputSize = ZSTD_startingInputLength(format);
+ if (srcSize < minInputSize) return ERROR(srcSize_wrong);
+
+ { BYTE const fhd = ((const BYTE*)src)[minInputSize-1];
+ U32 const dictID= fhd & 3;
+ U32 const singleSegment = (fhd >> 5) & 1;
+ U32 const fcsId = fhd >> 6;
+ return minInputSize + !singleSegment
+ + ZSTD_did_fieldSize[dictID] + ZSTD_fcs_fieldSize[fcsId]
+ + (singleSegment && !fcsId);
+ }
+}
+
+/** ZSTD_frameHeaderSize() :
+ * srcSize must be >= ZSTD_frameHeaderSize_prefix.
+ * @return : size of the Frame Header,
+ * or an error code (if srcSize is too small) */
+size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize)
+{
+ return ZSTD_frameHeaderSize_internal(src, srcSize, ZSTD_f_zstd1);
+}
+
+
+/** ZSTD_getFrameHeader_advanced() :
+ * decode Frame Header, or require larger `srcSize`.
+ * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless
+ * @return : 0, `zfhPtr` is correctly filled,
+ * >0, `srcSize` is too small, value is wanted `srcSize` amount,
+ * or an error code, which can be tested using ZSTD_isError() */
+size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format)
+{
+ const BYTE* ip = (const BYTE*)src;
+ size_t const minInputSize = ZSTD_startingInputLength(format);
+
+ memset(zfhPtr, 0, sizeof(*zfhPtr)); /* not strictly necessary, but static analyzer do not understand that zfhPtr is only going to be read only if return value is zero, since they are 2 different signals */
+ if (srcSize < minInputSize) return minInputSize;
+ if (src==NULL) return ERROR(GENERIC); /* invalid parameter */
+
+ if ( (format != ZSTD_f_zstd1_magicless)
+ && (MEM_readLE32(src) != ZSTD_MAGICNUMBER) ) {
+ if ((MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
+ /* skippable frame */
+ if (srcSize < ZSTD_skippableHeaderSize)
+ return ZSTD_skippableHeaderSize; /* magic number + frame length */
+ memset(zfhPtr, 0, sizeof(*zfhPtr));
+ zfhPtr->frameContentSize = MEM_readLE32((const char *)src + ZSTD_FRAMEIDSIZE);
+ zfhPtr->frameType = ZSTD_skippableFrame;
+ return 0;
+ }
+ return ERROR(prefix_unknown);
+ }
+
+ /* ensure there is enough `srcSize` to fully read/decode frame header */
+ { size_t const fhsize = ZSTD_frameHeaderSize_internal(src, srcSize, format);
+ if (srcSize < fhsize) return fhsize;
+ zfhPtr->headerSize = (U32)fhsize;
+ }
+
+ { BYTE const fhdByte = ip[minInputSize-1];
+ size_t pos = minInputSize;
+ U32 const dictIDSizeCode = fhdByte&3;
+ U32 const checksumFlag = (fhdByte>>2)&1;
+ U32 const singleSegment = (fhdByte>>5)&1;
+ U32 const fcsID = fhdByte>>6;
+ U64 windowSize = 0;
+ U32 dictID = 0;
+ U64 frameContentSize = ZSTD_CONTENTSIZE_UNKNOWN;
+ if ((fhdByte & 0x08) != 0)
+ return ERROR(frameParameter_unsupported); /* reserved bits, must be zero */
+
+ if (!singleSegment) {
+ BYTE const wlByte = ip[pos++];
+ U32 const windowLog = (wlByte >> 3) + ZSTD_WINDOWLOG_ABSOLUTEMIN;
+ if (windowLog > ZSTD_WINDOWLOG_MAX)
+ return ERROR(frameParameter_windowTooLarge);
+ windowSize = (1ULL << windowLog);
+ windowSize += (windowSize >> 3) * (wlByte&7);
+ }
+ switch(dictIDSizeCode)
+ {
+ default: assert(0); /* impossible */
+ case 0 : break;
+ case 1 : dictID = ip[pos]; pos++; break;
+ case 2 : dictID = MEM_readLE16(ip+pos); pos+=2; break;
+ case 3 : dictID = MEM_readLE32(ip+pos); pos+=4; break;
+ }
+ switch(fcsID)
+ {
+ default: assert(0); /* impossible */
+ case 0 : if (singleSegment) frameContentSize = ip[pos]; break;
+ case 1 : frameContentSize = MEM_readLE16(ip+pos)+256; break;
+ case 2 : frameContentSize = MEM_readLE32(ip+pos); break;
+ case 3 : frameContentSize = MEM_readLE64(ip+pos); break;
+ }
+ if (singleSegment) windowSize = frameContentSize;
+
+ zfhPtr->frameType = ZSTD_frame;
+ zfhPtr->frameContentSize = frameContentSize;
+ zfhPtr->windowSize = windowSize;
+ zfhPtr->blockSizeMax = (unsigned) MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
+ zfhPtr->dictID = dictID;
+ zfhPtr->checksumFlag = checksumFlag;
+ }
+ return 0;
+}
+
+/** ZSTD_getFrameHeader() :
+ * decode Frame Header, or require larger `srcSize`.
+ * note : this function does not consume input, it only reads it.
+ * @return : 0, `zfhPtr` is correctly filled,
+ * >0, `srcSize` is too small, value is wanted `srcSize` amount,
+ * or an error code, which can be tested using ZSTD_isError() */
+size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize)
+{
+ return ZSTD_getFrameHeader_advanced(zfhPtr, src, srcSize, ZSTD_f_zstd1);
+}
+
+
+/** ZSTD_getFrameContentSize() :
+ * compatible with legacy mode
+ * @return : decompressed size of the single frame pointed to be `src` if known, otherwise
+ * - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
+ * - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */
+unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize)
+{
+#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
+ if (ZSTD_isLegacy(src, srcSize)) {
+ unsigned long long const ret = ZSTD_getDecompressedSize_legacy(src, srcSize);
+ return ret == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : ret;
+ }
+#endif
+ { ZSTD_frameHeader zfh;
+ if (ZSTD_getFrameHeader(&zfh, src, srcSize) != 0)
+ return ZSTD_CONTENTSIZE_ERROR;
+ if (zfh.frameType == ZSTD_skippableFrame) {
+ return 0;
+ } else {
+ return zfh.frameContentSize;
+ } }
+}
+
+/** ZSTD_findDecompressedSize() :
+ * compatible with legacy mode
+ * `srcSize` must be the exact length of some number of ZSTD compressed and/or
+ * skippable frames
+ * @return : decompressed size of the frames contained */
+unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize)
+{
+ unsigned long long totalDstSize = 0;
+
+ while (srcSize >= ZSTD_frameHeaderSize_prefix) {
+ U32 const magicNumber = MEM_readLE32(src);
+
+ if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
+ size_t skippableSize;
+ if (srcSize < ZSTD_skippableHeaderSize)
+ return ERROR(srcSize_wrong);
+ skippableSize = MEM_readLE32((const BYTE *)src + ZSTD_FRAMEIDSIZE)
+ + ZSTD_skippableHeaderSize;
+ if (srcSize < skippableSize) {
+ return ZSTD_CONTENTSIZE_ERROR;
+ }
+
+ src = (const BYTE *)src + skippableSize;
+ srcSize -= skippableSize;
+ continue;
+ }
+
+ { unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize);
+ if (ret >= ZSTD_CONTENTSIZE_ERROR) return ret;
+
+ /* check for overflow */
+ if (totalDstSize + ret < totalDstSize) return ZSTD_CONTENTSIZE_ERROR;
+ totalDstSize += ret;
+ }
+ { size_t const frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize);
+ if (ZSTD_isError(frameSrcSize)) {
+ return ZSTD_CONTENTSIZE_ERROR;
+ }
+
+ src = (const BYTE *)src + frameSrcSize;
+ srcSize -= frameSrcSize;
+ }
+ } /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */
+
+ if (srcSize) return ZSTD_CONTENTSIZE_ERROR;
+
+ return totalDstSize;
+}
+
+/** ZSTD_getDecompressedSize() :
+* compatible with legacy mode
+* @return : decompressed size if known, 0 otherwise
+ note : 0 can mean any of the following :
+ - frame content is empty
+ - decompressed size field is not present in frame header
+ - frame header unknown / not supported
+ - frame header not complete (`srcSize` too small) */
+unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize)
+{
+ unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize);
+ ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_ERROR < ZSTD_CONTENTSIZE_UNKNOWN);
+ return (ret >= ZSTD_CONTENTSIZE_ERROR) ? 0 : ret;
+}
+
+
+/** ZSTD_decodeFrameHeader() :
+* `headerSize` must be the size provided by ZSTD_frameHeaderSize().
+* @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */
+static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx* dctx, const void* src, size_t headerSize)
+{
+ size_t const result = ZSTD_getFrameHeader_advanced(&(dctx->fParams), src, headerSize, dctx->format);
+ if (ZSTD_isError(result)) return result; /* invalid header */
+ if (result>0) return ERROR(srcSize_wrong); /* headerSize too small */
+ if (dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID))
+ return ERROR(dictionary_wrong);
+ if (dctx->fParams.checksumFlag) XXH64_reset(&dctx->xxhState, 0);
+ return 0;
+}
+
+
+/*-*************************************************************
+ * Block decoding
+ ***************************************************************/
+
+/*! ZSTD_getcBlockSize() :
+* Provides the size of compressed block from block header `src` */
+size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
+ blockProperties_t* bpPtr)
+{
+ if (srcSize < ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
+ { U32 const cBlockHeader = MEM_readLE24(src);
+ U32 const cSize = cBlockHeader >> 3;
+ bpPtr->lastBlock = cBlockHeader & 1;
+ bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3);
+ bpPtr->origSize = cSize; /* only useful for RLE */
+ if (bpPtr->blockType == bt_rle) return 1;
+ if (bpPtr->blockType == bt_reserved) return ERROR(corruption_detected);
+ return cSize;
+ }
+}
+
+
+static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
+{
+ if (dst==NULL) return ERROR(dstSize_tooSmall);
+ if (srcSize > dstCapacity) return ERROR(dstSize_tooSmall);
+ memcpy(dst, src, srcSize);
+ return srcSize;
+}
+
+
+static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ size_t regenSize)
+{
+ if (srcSize != 1) return ERROR(srcSize_wrong);
+ if (regenSize > dstCapacity) return ERROR(dstSize_tooSmall);
+ memset(dst, *(const BYTE*)src, regenSize);
+ return regenSize;
+}
+
+/* Hidden declaration for fullbench */
+size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
+ const void* src, size_t srcSize);
+/*! ZSTD_decodeLiteralsBlock() :
+ * @return : nb of bytes read from src (< srcSize )
+ * note : symbol not declared but exposed for fullbench */
+size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
+ const void* src, size_t srcSize) /* note : srcSize < BLOCKSIZE */
+{
+ if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);
+
+ { const BYTE* const istart = (const BYTE*) src;
+ symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3);
+
+ switch(litEncType)
+ {
+ case set_repeat:
+ if (dctx->litEntropy==0) return ERROR(dictionary_corrupted);
+ /* fall-through */
+
+ case set_compressed:
+ if (srcSize < 5) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3 */
+ { size_t lhSize, litSize, litCSize;
+ U32 singleStream=0;
+ U32 const lhlCode = (istart[0] >> 2) & 3;
+ U32 const lhc = MEM_readLE32(istart);
+ switch(lhlCode)
+ {
+ case 0: case 1: default: /* note : default is impossible, since lhlCode into [0..3] */
+ /* 2 - 2 - 10 - 10 */
+ singleStream = !lhlCode;
+ lhSize = 3;
+ litSize = (lhc >> 4) & 0x3FF;
+ litCSize = (lhc >> 14) & 0x3FF;
+ break;
+ case 2:
+ /* 2 - 2 - 14 - 14 */
+ lhSize = 4;
+ litSize = (lhc >> 4) & 0x3FFF;
+ litCSize = lhc >> 18;
+ break;
+ case 3:
+ /* 2 - 2 - 18 - 18 */
+ lhSize = 5;
+ litSize = (lhc >> 4) & 0x3FFFF;
+ litCSize = (lhc >> 22) + (istart[4] << 10);
+ break;
+ }
+ if (litSize > ZSTD_BLOCKSIZE_MAX) return ERROR(corruption_detected);
+ if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);
+
+ /* prefetch huffman table if cold */
+ if (dctx->ddictIsCold && (litSize > 768 /* heuristic */)) {
+ PREFETCH_AREA(dctx->HUFptr, sizeof(dctx->entropy.hufTable));
+ }
+
+ if (HUF_isError((litEncType==set_repeat) ?
+ ( singleStream ?
+ HUF_decompress1X_usingDTable_bmi2(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->HUFptr, dctx->bmi2) :
+ HUF_decompress4X_usingDTable_bmi2(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->HUFptr, dctx->bmi2) ) :
+ ( singleStream ?
+ HUF_decompress1X1_DCtx_wksp_bmi2(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize,
+ dctx->workspace, sizeof(dctx->workspace), dctx->bmi2) :
+ HUF_decompress4X_hufOnly_wksp_bmi2(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize,
+ dctx->workspace, sizeof(dctx->workspace), dctx->bmi2))))
+ return ERROR(corruption_detected);
+
+ dctx->litPtr = dctx->litBuffer;
+ dctx->litSize = litSize;
+ dctx->litEntropy = 1;
+ if (litEncType==set_compressed) dctx->HUFptr = dctx->entropy.hufTable;
+ memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
+ return litCSize + lhSize;
+ }
+
+ case set_basic:
+ { size_t litSize, lhSize;
+ U32 const lhlCode = ((istart[0]) >> 2) & 3;
+ switch(lhlCode)
+ {
+ case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */
+ lhSize = 1;
+ litSize = istart[0] >> 3;
+ break;
+ case 1:
+ lhSize = 2;
+ litSize = MEM_readLE16(istart) >> 4;
+ break;
+ case 3:
+ lhSize = 3;
+ litSize = MEM_readLE24(istart) >> 4;
+ break;
+ }
+
+ if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */
+ if (litSize+lhSize > srcSize) return ERROR(corruption_detected);
+ memcpy(dctx->litBuffer, istart+lhSize, litSize);
+ dctx->litPtr = dctx->litBuffer;
+ dctx->litSize = litSize;
+ memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
+ return lhSize+litSize;
+ }
+ /* direct reference into compressed stream */
+ dctx->litPtr = istart+lhSize;
+ dctx->litSize = litSize;
+ return lhSize+litSize;
+ }
+
+ case set_rle:
+ { U32 const lhlCode = ((istart[0]) >> 2) & 3;
+ size_t litSize, lhSize;
+ switch(lhlCode)
+ {
+ case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */
+ lhSize = 1;
+ litSize = istart[0] >> 3;
+ break;
+ case 1:
+ lhSize = 2;
+ litSize = MEM_readLE16(istart) >> 4;
+ break;
+ case 3:
+ lhSize = 3;
+ litSize = MEM_readLE24(istart) >> 4;
+ if (srcSize<4) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */
+ break;
+ }
+ if (litSize > ZSTD_BLOCKSIZE_MAX) return ERROR(corruption_detected);
+ memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
+ dctx->litPtr = dctx->litBuffer;
+ dctx->litSize = litSize;
+ return lhSize+1;
+ }
+ default:
+ return ERROR(corruption_detected); /* impossible */
+ }
+ }
+}
+
+/* Default FSE distribution tables.
+ * These are pre-calculated FSE decoding tables using default distributions as defined in specification :
+ * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#default-distributions
+ * They were generated programmatically with following method :
+ * - start from default distributions, present in /lib/common/zstd_internal.h
+ * - generate tables normally, using ZSTD_buildFSETable()
+ * - printout the content of tables
+ * - pretify output, report below, test with fuzzer to ensure it's correct */
+
+/* Default FSE distribution table for Literal Lengths */
+static const ZSTD_seqSymbol LL_defaultDTable[(1<<LL_DEFAULTNORMLOG)+1] = {
+ { 1, 1, 1, LL_DEFAULTNORMLOG}, /* header : fastMode, tableLog */
+ /* nextState, nbAddBits, nbBits, baseVal */
+ { 0, 0, 4, 0}, { 16, 0, 4, 0},
+ { 32, 0, 5, 1}, { 0, 0, 5, 3},
+ { 0, 0, 5, 4}, { 0, 0, 5, 6},
+ { 0, 0, 5, 7}, { 0, 0, 5, 9},
+ { 0, 0, 5, 10}, { 0, 0, 5, 12},
+ { 0, 0, 6, 14}, { 0, 1, 5, 16},
+ { 0, 1, 5, 20}, { 0, 1, 5, 22},
+ { 0, 2, 5, 28}, { 0, 3, 5, 32},
+ { 0, 4, 5, 48}, { 32, 6, 5, 64},
+ { 0, 7, 5, 128}, { 0, 8, 6, 256},
+ { 0, 10, 6, 1024}, { 0, 12, 6, 4096},
+ { 32, 0, 4, 0}, { 0, 0, 4, 1},
+ { 0, 0, 5, 2}, { 32, 0, 5, 4},
+ { 0, 0, 5, 5}, { 32, 0, 5, 7},
+ { 0, 0, 5, 8}, { 32, 0, 5, 10},
+ { 0, 0, 5, 11}, { 0, 0, 6, 13},
+ { 32, 1, 5, 16}, { 0, 1, 5, 18},
+ { 32, 1, 5, 22}, { 0, 2, 5, 24},
+ { 32, 3, 5, 32}, { 0, 3, 5, 40},
+ { 0, 6, 4, 64}, { 16, 6, 4, 64},
+ { 32, 7, 5, 128}, { 0, 9, 6, 512},
+ { 0, 11, 6, 2048}, { 48, 0, 4, 0},
+ { 16, 0, 4, 1}, { 32, 0, 5, 2},
+ { 32, 0, 5, 3}, { 32, 0, 5, 5},
+ { 32, 0, 5, 6}, { 32, 0, 5, 8},
+ { 32, 0, 5, 9}, { 32, 0, 5, 11},
+ { 32, 0, 5, 12}, { 0, 0, 6, 15},
+ { 32, 1, 5, 18}, { 32, 1, 5, 20},
+ { 32, 2, 5, 24}, { 32, 2, 5, 28},
+ { 32, 3, 5, 40}, { 32, 4, 5, 48},
+ { 0, 16, 6,65536}, { 0, 15, 6,32768},
+ { 0, 14, 6,16384}, { 0, 13, 6, 8192},
+}; /* LL_defaultDTable */
+
+/* Default FSE distribution table for Offset Codes */
+static const ZSTD_seqSymbol OF_defaultDTable[(1<<OF_DEFAULTNORMLOG)+1] = {
+ { 1, 1, 1, OF_DEFAULTNORMLOG}, /* header : fastMode, tableLog */
+ /* nextState, nbAddBits, nbBits, baseVal */
+ { 0, 0, 5, 0}, { 0, 6, 4, 61},
+ { 0, 9, 5, 509}, { 0, 15, 5,32765},
+ { 0, 21, 5,2097149}, { 0, 3, 5, 5},
+ { 0, 7, 4, 125}, { 0, 12, 5, 4093},
+ { 0, 18, 5,262141}, { 0, 23, 5,8388605},
+ { 0, 5, 5, 29}, { 0, 8, 4, 253},
+ { 0, 14, 5,16381}, { 0, 20, 5,1048573},
+ { 0, 2, 5, 1}, { 16, 7, 4, 125},
+ { 0, 11, 5, 2045}, { 0, 17, 5,131069},
+ { 0, 22, 5,4194301}, { 0, 4, 5, 13},
+ { 16, 8, 4, 253}, { 0, 13, 5, 8189},
+ { 0, 19, 5,524285}, { 0, 1, 5, 1},
+ { 16, 6, 4, 61}, { 0, 10, 5, 1021},
+ { 0, 16, 5,65533}, { 0, 28, 5,268435453},
+ { 0, 27, 5,134217725}, { 0, 26, 5,67108861},
+ { 0, 25, 5,33554429}, { 0, 24, 5,16777213},
+}; /* OF_defaultDTable */
+
+
+/* Default FSE distribution table for Match Lengths */
+static const ZSTD_seqSymbol ML_defaultDTable[(1<<ML_DEFAULTNORMLOG)+1] = {
+ { 1, 1, 1, ML_DEFAULTNORMLOG}, /* header : fastMode, tableLog */
+ /* nextState, nbAddBits, nbBits, baseVal */
+ { 0, 0, 6, 3}, { 0, 0, 4, 4},
+ { 32, 0, 5, 5}, { 0, 0, 5, 6},
+ { 0, 0, 5, 8}, { 0, 0, 5, 9},
+ { 0, 0, 5, 11}, { 0, 0, 6, 13},
+ { 0, 0, 6, 16}, { 0, 0, 6, 19},
+ { 0, 0, 6, 22}, { 0, 0, 6, 25},
+ { 0, 0, 6, 28}, { 0, 0, 6, 31},
+ { 0, 0, 6, 34}, { 0, 1, 6, 37},
+ { 0, 1, 6, 41}, { 0, 2, 6, 47},
+ { 0, 3, 6, 59}, { 0, 4, 6, 83},
+ { 0, 7, 6, 131}, { 0, 9, 6, 515},
+ { 16, 0, 4, 4}, { 0, 0, 4, 5},
+ { 32, 0, 5, 6}, { 0, 0, 5, 7},
+ { 32, 0, 5, 9}, { 0, 0, 5, 10},
+ { 0, 0, 6, 12}, { 0, 0, 6, 15},
+ { 0, 0, 6, 18}, { 0, 0, 6, 21},
+ { 0, 0, 6, 24}, { 0, 0, 6, 27},
+ { 0, 0, 6, 30}, { 0, 0, 6, 33},
+ { 0, 1, 6, 35}, { 0, 1, 6, 39},
+ { 0, 2, 6, 43}, { 0, 3, 6, 51},
+ { 0, 4, 6, 67}, { 0, 5, 6, 99},
+ { 0, 8, 6, 259}, { 32, 0, 4, 4},
+ { 48, 0, 4, 4}, { 16, 0, 4, 5},
+ { 32, 0, 5, 7}, { 32, 0, 5, 8},
+ { 32, 0, 5, 10}, { 32, 0, 5, 11},
+ { 0, 0, 6, 14}, { 0, 0, 6, 17},
+ { 0, 0, 6, 20}, { 0, 0, 6, 23},
+ { 0, 0, 6, 26}, { 0, 0, 6, 29},
+ { 0, 0, 6, 32}, { 0, 16, 6,65539},
+ { 0, 15, 6,32771}, { 0, 14, 6,16387},
+ { 0, 13, 6, 8195}, { 0, 12, 6, 4099},
+ { 0, 11, 6, 2051}, { 0, 10, 6, 1027},
+}; /* ML_defaultDTable */
+
+
+static void ZSTD_buildSeqTable_rle(ZSTD_seqSymbol* dt, U32 baseValue, U32 nbAddBits)
+{
+ void* ptr = dt;
+ ZSTD_seqSymbol_header* const DTableH = (ZSTD_seqSymbol_header*)ptr;
+ ZSTD_seqSymbol* const cell = dt + 1;
+
+ DTableH->tableLog = 0;
+ DTableH->fastMode = 0;
+
+ cell->nbBits = 0;
+ cell->nextState = 0;
+ assert(nbAddBits < 255);
+ cell->nbAdditionalBits = (BYTE)nbAddBits;
+ cell->baseValue = baseValue;
+}
+
+
+/* ZSTD_buildFSETable() :
+ * generate FSE decoding table for one symbol (ll, ml or off) */
+static void
+ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
+ const short* normalizedCounter, unsigned maxSymbolValue,
+ const U32* baseValue, const U32* nbAdditionalBits,
+ unsigned tableLog)
+{
+ ZSTD_seqSymbol* const tableDecode = dt+1;
+ U16 symbolNext[MaxSeq+1];
+
+ U32 const maxSV1 = maxSymbolValue + 1;
+ U32 const tableSize = 1 << tableLog;
+ U32 highThreshold = tableSize-1;
+
+ /* Sanity Checks */
+ assert(maxSymbolValue <= MaxSeq);
+ assert(tableLog <= MaxFSELog);
+
+ /* Init, lay down lowprob symbols */
+ { ZSTD_seqSymbol_header DTableH;
+ DTableH.tableLog = tableLog;
+ DTableH.fastMode = 1;
+ { S16 const largeLimit= (S16)(1 << (tableLog-1));
+ U32 s;
+ for (s=0; s<maxSV1; s++) {
+ if (normalizedCounter[s]==-1) {
+ tableDecode[highThreshold--].baseValue = s;
+ symbolNext[s] = 1;
+ } else {
+ if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
+ symbolNext[s] = normalizedCounter[s];
+ } } }
+ memcpy(dt, &DTableH, sizeof(DTableH));
+ }
+
+ /* Spread symbols */
+ { U32 const tableMask = tableSize-1;
+ U32 const step = FSE_TABLESTEP(tableSize);
+ U32 s, position = 0;
+ for (s=0; s<maxSV1; s++) {
+ int i;
+ for (i=0; i<normalizedCounter[s]; i++) {
+ tableDecode[position].baseValue = s;
+ position = (position + step) & tableMask;
+ while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */
+ } }
+ assert(position == 0); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
+ }
+
+ /* Build Decoding table */
+ { U32 u;
+ for (u=0; u<tableSize; u++) {
+ U32 const symbol = tableDecode[u].baseValue;
+ U32 const nextState = symbolNext[symbol]++;
+ tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32(nextState) );
+ tableDecode[u].nextState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
+ assert(nbAdditionalBits[symbol] < 255);
+ tableDecode[u].nbAdditionalBits = (BYTE)nbAdditionalBits[symbol];
+ tableDecode[u].baseValue = baseValue[symbol];
+ } }
+}
+
+
+/*! ZSTD_buildSeqTable() :
+ * @return : nb bytes read from src,
+ * or an error code if it fails */
+static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymbol** DTablePtr,
+ symbolEncodingType_e type, U32 max, U32 maxLog,
+ const void* src, size_t srcSize,
+ const U32* baseValue, const U32* nbAdditionalBits,
+ const ZSTD_seqSymbol* defaultTable, U32 flagRepeatTable,
+ int ddictIsCold, int nbSeq)
+{
+ switch(type)
+ {
+ case set_rle :
+ if (!srcSize) return ERROR(srcSize_wrong);
+ if ( (*(const BYTE*)src) > max) return ERROR(corruption_detected);
+ { U32 const symbol = *(const BYTE*)src;
+ U32 const baseline = baseValue[symbol];
+ U32 const nbBits = nbAdditionalBits[symbol];
+ ZSTD_buildSeqTable_rle(DTableSpace, baseline, nbBits);
+ }
+ *DTablePtr = DTableSpace;
+ return 1;
+ case set_basic :
+ *DTablePtr = defaultTable;
+ return 0;
+ case set_repeat:
+ if (!flagRepeatTable) return ERROR(corruption_detected);
+ /* prefetch FSE table if used */
+ if (ddictIsCold && (nbSeq > 24 /* heuristic */)) {
+ const void* const pStart = *DTablePtr;
+ size_t const pSize = sizeof(ZSTD_seqSymbol) * (SEQSYMBOL_TABLE_SIZE(maxLog));
+ PREFETCH_AREA(pStart, pSize);
+ }
+ return 0;
+ case set_compressed :
+ { U32 tableLog;
+ S16 norm[MaxSeq+1];
+ size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize);
+ if (FSE_isError(headerSize)) return ERROR(corruption_detected);
+ if (tableLog > maxLog) return ERROR(corruption_detected);
+ ZSTD_buildFSETable(DTableSpace, norm, max, baseValue, nbAdditionalBits, tableLog);
+ *DTablePtr = DTableSpace;
+ return headerSize;
+ }
+ default : /* impossible */
+ assert(0);
+ return ERROR(GENERIC);
+ }
+}
+
+static const U32 LL_base[MaxLL+1] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 18, 20, 22, 24, 28, 32, 40,
+ 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000,
+ 0x2000, 0x4000, 0x8000, 0x10000 };
+
+static const U32 OF_base[MaxOff+1] = {
+ 0, 1, 1, 5, 0xD, 0x1D, 0x3D, 0x7D,
+ 0xFD, 0x1FD, 0x3FD, 0x7FD, 0xFFD, 0x1FFD, 0x3FFD, 0x7FFD,
+ 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD,
+ 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD, 0x1FFFFFFD, 0x3FFFFFFD, 0x7FFFFFFD };
+
+static const U32 OF_bits[MaxOff+1] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31 };
+
+static const U32 ML_base[MaxML+1] = {
+ 3, 4, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 20, 21, 22, 23, 24, 25, 26,
+ 27, 28, 29, 30, 31, 32, 33, 34,
+ 35, 37, 39, 41, 43, 47, 51, 59,
+ 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803,
+ 0x1003, 0x2003, 0x4003, 0x8003, 0x10003 };
+
+/* Hidden delcaration for fullbench */
+size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
+ const void* src, size_t srcSize);
+
+size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
+ const void* src, size_t srcSize)
+{
+ const BYTE* const istart = (const BYTE* const)src;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* ip = istart;
+ int nbSeq;
+ DEBUGLOG(5, "ZSTD_decodeSeqHeaders");
+
+ /* check */
+ if (srcSize < MIN_SEQUENCES_SIZE) return ERROR(srcSize_wrong);
+
+ /* SeqHead */
+ nbSeq = *ip++;
+ if (!nbSeq) { *nbSeqPtr=0; return 1; }
+ if (nbSeq > 0x7F) {
+ if (nbSeq == 0xFF) {
+ if (ip+2 > iend) return ERROR(srcSize_wrong);
+ nbSeq = MEM_readLE16(ip) + LONGNBSEQ, ip+=2;
+ } else {
+ if (ip >= iend) return ERROR(srcSize_wrong);
+ nbSeq = ((nbSeq-0x80)<<8) + *ip++;
+ }
+ }
+ *nbSeqPtr = nbSeq;
+
+ /* FSE table descriptors */
+ if (ip+4 > iend) return ERROR(srcSize_wrong); /* minimum possible size */
+ { symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6);
+ symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3);
+ symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3);
+ ip++;
+
+ /* Build DTables */
+ { size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr,
+ LLtype, MaxLL, LLFSELog,
+ ip, iend-ip,
+ LL_base, LL_bits,
+ LL_defaultDTable, dctx->fseEntropy,
+ dctx->ddictIsCold, nbSeq);
+ if (ZSTD_isError(llhSize)) return ERROR(corruption_detected);
+ ip += llhSize;
+ }
+
+ { size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr,
+ OFtype, MaxOff, OffFSELog,
+ ip, iend-ip,
+ OF_base, OF_bits,
+ OF_defaultDTable, dctx->fseEntropy,
+ dctx->ddictIsCold, nbSeq);
+ if (ZSTD_isError(ofhSize)) return ERROR(corruption_detected);
+ ip += ofhSize;
+ }
+
+ { size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr,
+ MLtype, MaxML, MLFSELog,
+ ip, iend-ip,
+ ML_base, ML_bits,
+ ML_defaultDTable, dctx->fseEntropy,
+ dctx->ddictIsCold, nbSeq);
+ if (ZSTD_isError(mlhSize)) return ERROR(corruption_detected);
+ ip += mlhSize;
+ }
+ }
+
+ /* prefetch dictionary content */
+ if (dctx->ddictIsCold) {
+ size_t const dictSize = (const char*)dctx->prefixStart - (const char*)dctx->virtualStart;
+ size_t const psmin = MIN(dictSize, (size_t)(64*nbSeq) /* heuristic */ );
+ size_t const pSize = MIN(psmin, 128 KB /* protection */ );
+ const void* const pStart = (const char*)dctx->dictEnd - pSize;
+ PREFETCH_AREA(pStart, pSize);
+ dctx->ddictIsCold = 0;
+ }
+
+ return ip-istart;
+}
+
+
+typedef struct {
+ size_t litLength;
+ size_t matchLength;
+ size_t offset;
+ const BYTE* match;
+} seq_t;
+
+typedef struct {
+ size_t state;
+ const ZSTD_seqSymbol* table;
+} ZSTD_fseState;
+
+typedef struct {
+ BIT_DStream_t DStream;
+ ZSTD_fseState stateLL;
+ ZSTD_fseState stateOffb;
+ ZSTD_fseState stateML;
+ size_t prevOffset[ZSTD_REP_NUM];
+ const BYTE* prefixStart;
+ const BYTE* dictEnd;
+ size_t pos;
+} seqState_t;
+
+
+FORCE_NOINLINE
+size_t ZSTD_execSequenceLast7(BYTE* op,
+ BYTE* const oend, seq_t sequence,
+ const BYTE** litPtr, const BYTE* const litLimit,
+ const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
+{
+ BYTE* const oLitEnd = op + sequence.litLength;
+ size_t const sequenceLength = sequence.litLength + sequence.matchLength;
+ BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
+ BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
+ const BYTE* const iLitEnd = *litPtr + sequence.litLength;
+ const BYTE* match = oLitEnd - sequence.offset;
+
+ /* check */
+ if (oMatchEnd>oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
+ if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */
+ if (oLitEnd <= oend_w) return ERROR(GENERIC); /* Precondition */
+
+ /* copy literals */
+ if (op < oend_w) {
+ ZSTD_wildcopy(op, *litPtr, oend_w - op);
+ *litPtr += oend_w - op;
+ op = oend_w;
+ }
+ while (op < oLitEnd) *op++ = *(*litPtr)++;
+
+ /* copy Match */
+ if (sequence.offset > (size_t)(oLitEnd - base)) {
+ /* offset beyond prefix */
+ if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected);
+ match = dictEnd - (base-match);
+ if (match + sequence.matchLength <= dictEnd) {
+ memmove(oLitEnd, match, sequence.matchLength);
+ return sequenceLength;
+ }
+ /* span extDict & currentPrefixSegment */
+ { size_t const length1 = dictEnd - match;
+ memmove(oLitEnd, match, length1);
+ op = oLitEnd + length1;
+ sequence.matchLength -= length1;
+ match = base;
+ } }
+ while (op < oMatchEnd) *op++ = *match++;
+ return sequenceLength;
+}
+
+
+HINT_INLINE
+size_t ZSTD_execSequence(BYTE* op,
+ BYTE* const oend, seq_t sequence,
+ const BYTE** litPtr, const BYTE* const litLimit,
+ const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
+{
+ BYTE* const oLitEnd = op + sequence.litLength;
+ size_t const sequenceLength = sequence.litLength + sequence.matchLength;
+ BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
+ BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
+ const BYTE* const iLitEnd = *litPtr + sequence.litLength;
+ const BYTE* match = oLitEnd - sequence.offset;
+
+ /* check */
+ if (oMatchEnd>oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
+ if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */
+ if (oLitEnd>oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd);
+
+ /* copy Literals */
+ ZSTD_copy8(op, *litPtr);
+ if (sequence.litLength > 8)
+ ZSTD_wildcopy(op+8, (*litPtr)+8, sequence.litLength - 8); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
+ op = oLitEnd;
+ *litPtr = iLitEnd; /* update for next sequence */
+
+ /* copy Match */
+ if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
+ /* offset beyond prefix -> go into extDict */
+ if (sequence.offset > (size_t)(oLitEnd - virtualStart))
+ return ERROR(corruption_detected);
+ match = dictEnd + (match - prefixStart);
+ if (match + sequence.matchLength <= dictEnd) {
+ memmove(oLitEnd, match, sequence.matchLength);
+ return sequenceLength;
+ }
+ /* span extDict & currentPrefixSegment */
+ { size_t const length1 = dictEnd - match;
+ memmove(oLitEnd, match, length1);
+ op = oLitEnd + length1;
+ sequence.matchLength -= length1;
+ match = prefixStart;
+ if (op > oend_w || sequence.matchLength < MINMATCH) {
+ U32 i;
+ for (i = 0; i < sequence.matchLength; ++i) op[i] = match[i];
+ return sequenceLength;
+ }
+ } }
+ /* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */
+
+ /* match within prefix */
+ if (sequence.offset < 8) {
+ /* close range match, overlap */
+ static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */
+ static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */
+ int const sub2 = dec64table[sequence.offset];
+ op[0] = match[0];
+ op[1] = match[1];
+ op[2] = match[2];
+ op[3] = match[3];
+ match += dec32table[sequence.offset];
+ ZSTD_copy4(op+4, match);
+ match -= sub2;
+ } else {
+ ZSTD_copy8(op, match);
+ }
+ op += 8; match += 8;
+
+ if (oMatchEnd > oend-(16-MINMATCH)) {
+ if (op < oend_w) {
+ ZSTD_wildcopy(op, match, oend_w - op);
+ match += oend_w - op;
+ op = oend_w;
+ }
+ while (op < oMatchEnd) *op++ = *match++;
+ } else {
+ ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */
+ }
+ return sequenceLength;
+}
+
+
+HINT_INLINE
+size_t ZSTD_execSequenceLong(BYTE* op,
+ BYTE* const oend, seq_t sequence,
+ const BYTE** litPtr, const BYTE* const litLimit,
+ const BYTE* const prefixStart, const BYTE* const dictStart, const BYTE* const dictEnd)
+{
+ BYTE* const oLitEnd = op + sequence.litLength;
+ size_t const sequenceLength = sequence.litLength + sequence.matchLength;
+ BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
+ BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
+ const BYTE* const iLitEnd = *litPtr + sequence.litLength;
+ const BYTE* match = sequence.match;
+
+ /* check */
+ if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
+ if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */
+ if (oLitEnd > oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, prefixStart, dictStart, dictEnd);
+
+ /* copy Literals */
+ ZSTD_copy8(op, *litPtr); /* note : op <= oLitEnd <= oend_w == oend - 8 */
+ if (sequence.litLength > 8)
+ ZSTD_wildcopy(op+8, (*litPtr)+8, sequence.litLength - 8); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
+ op = oLitEnd;
+ *litPtr = iLitEnd; /* update for next sequence */
+
+ /* copy Match */
+ if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
+ /* offset beyond prefix */
+ if (sequence.offset > (size_t)(oLitEnd - dictStart)) return ERROR(corruption_detected);
+ if (match + sequence.matchLength <= dictEnd) {
+ memmove(oLitEnd, match, sequence.matchLength);
+ return sequenceLength;
+ }
+ /* span extDict & currentPrefixSegment */
+ { size_t const length1 = dictEnd - match;
+ memmove(oLitEnd, match, length1);
+ op = oLitEnd + length1;
+ sequence.matchLength -= length1;
+ match = prefixStart;
+ if (op > oend_w || sequence.matchLength < MINMATCH) {
+ U32 i;
+ for (i = 0; i < sequence.matchLength; ++i) op[i] = match[i];
+ return sequenceLength;
+ }
+ } }
+ assert(op <= oend_w);
+ assert(sequence.matchLength >= MINMATCH);
+
+ /* match within prefix */
+ if (sequence.offset < 8) {
+ /* close range match, overlap */
+ static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */
+ static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */
+ int const sub2 = dec64table[sequence.offset];
+ op[0] = match[0];
+ op[1] = match[1];
+ op[2] = match[2];
+ op[3] = match[3];
+ match += dec32table[sequence.offset];
+ ZSTD_copy4(op+4, match);
+ match -= sub2;
+ } else {
+ ZSTD_copy8(op, match);
+ }
+ op += 8; match += 8;
+
+ if (oMatchEnd > oend-(16-MINMATCH)) {
+ if (op < oend_w) {
+ ZSTD_wildcopy(op, match, oend_w - op);
+ match += oend_w - op;
+ op = oend_w;
+ }
+ while (op < oMatchEnd) *op++ = *match++;
+ } else {
+ ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */
+ }
+ return sequenceLength;
+}
+
+static void
+ZSTD_initFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, const ZSTD_seqSymbol* dt)
+{
+ const void* ptr = dt;
+ const ZSTD_seqSymbol_header* const DTableH = (const ZSTD_seqSymbol_header*)ptr;
+ DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);
+ DEBUGLOG(6, "ZSTD_initFseState : val=%u using %u bits",
+ (U32)DStatePtr->state, DTableH->tableLog);
+ BIT_reloadDStream(bitD);
+ DStatePtr->table = dt + 1;
+}
+
+FORCE_INLINE_TEMPLATE void
+ZSTD_updateFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD)
+{
+ ZSTD_seqSymbol const DInfo = DStatePtr->table[DStatePtr->state];
+ U32 const nbBits = DInfo.nbBits;
+ size_t const lowBits = BIT_readBits(bitD, nbBits);
+ DStatePtr->state = DInfo.nextState + lowBits;
+}
+
+/* We need to add at most (ZSTD_WINDOWLOG_MAX_32 - 1) bits to read the maximum
+ * offset bits. But we can only read at most (STREAM_ACCUMULATOR_MIN_32 - 1)
+ * bits before reloading. This value is the maximum number of bytes we read
+ * after reloading when we are decoding long offets.
+ */
+#define LONG_OFFSETS_MAX_EXTRA_BITS_32 \
+ (ZSTD_WINDOWLOG_MAX_32 > STREAM_ACCUMULATOR_MIN_32 \
+ ? ZSTD_WINDOWLOG_MAX_32 - STREAM_ACCUMULATOR_MIN_32 \
+ : 0)
+
+typedef enum { ZSTD_lo_isRegularOffset, ZSTD_lo_isLongOffset=1 } ZSTD_longOffset_e;
+
+FORCE_INLINE_TEMPLATE seq_t
+ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets)
+{
+ seq_t seq = {0};
+ U32 const llBits = seqState->stateLL.table[seqState->stateLL.state].nbAdditionalBits;
+ U32 const mlBits = seqState->stateML.table[seqState->stateML.state].nbAdditionalBits;
+ U32 const ofBits = seqState->stateOffb.table[seqState->stateOffb.state].nbAdditionalBits;
+ U32 const totalBits = llBits+mlBits+ofBits;
+ U32 const llBase = seqState->stateLL.table[seqState->stateLL.state].baseValue;
+ U32 const mlBase = seqState->stateML.table[seqState->stateML.state].baseValue;
+ U32 const ofBase = seqState->stateOffb.table[seqState->stateOffb.state].baseValue;
+
+ /* sequence */
+ { size_t offset;
+ if (!ofBits)
+ offset = 0;
+ else {
+ ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1);
+ ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5);
+ assert(ofBits <= MaxOff);
+ if (MEM_32bits() && longOffsets && (ofBits >= STREAM_ACCUMULATOR_MIN_32)) {
+ U32 const extraBits = ofBits - MIN(ofBits, 32 - seqState->DStream.bitsConsumed);
+ offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits);
+ BIT_reloadDStream(&seqState->DStream);
+ if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits);
+ assert(extraBits <= LONG_OFFSETS_MAX_EXTRA_BITS_32); /* to avoid another reload */
+ } else {
+ offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits/*>0*/); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */
+ if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);
+ }
+ }
+
+ if (ofBits <= 1) {
+ offset += (llBase==0);
+ if (offset) {
+ size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
+ temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */
+ if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];
+ seqState->prevOffset[1] = seqState->prevOffset[0];
+ seqState->prevOffset[0] = offset = temp;
+ } else { /* offset == 0 */
+ offset = seqState->prevOffset[0];
+ }
+ } else {
+ seqState->prevOffset[2] = seqState->prevOffset[1];
+ seqState->prevOffset[1] = seqState->prevOffset[0];
+ seqState->prevOffset[0] = offset;
+ }
+ seq.offset = offset;
+ }
+
+ seq.matchLength = mlBase
+ + ((mlBits>0) ? BIT_readBitsFast(&seqState->DStream, mlBits/*>0*/) : 0); /* <= 16 bits */
+ if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32))
+ BIT_reloadDStream(&seqState->DStream);
+ if (MEM_64bits() && (totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog)))
+ BIT_reloadDStream(&seqState->DStream);
+ /* Ensure there are enough bits to read the rest of data in 64-bit mode. */
+ ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64);
+
+ seq.litLength = llBase
+ + ((llBits>0) ? BIT_readBitsFast(&seqState->DStream, llBits/*>0*/) : 0); /* <= 16 bits */
+ if (MEM_32bits())
+ BIT_reloadDStream(&seqState->DStream);
+
+ DEBUGLOG(6, "seq: litL=%u, matchL=%u, offset=%u",
+ (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset);
+
+ /* ANS state update */
+ ZSTD_updateFseState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */
+ ZSTD_updateFseState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */
+ if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */
+ ZSTD_updateFseState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */
+
+ return seq;
+}
+
+FORCE_INLINE_TEMPLATE size_t
+ZSTD_decompressSequences_body( ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset)
+{
+ const BYTE* ip = (const BYTE*)seqStart;
+ const BYTE* const iend = ip + seqSize;
+ BYTE* const ostart = (BYTE* const)dst;
+ BYTE* const oend = ostart + maxDstSize;
+ BYTE* op = ostart;
+ const BYTE* litPtr = dctx->litPtr;
+ const BYTE* const litEnd = litPtr + dctx->litSize;
+ const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart);
+ const BYTE* const vBase = (const BYTE*) (dctx->virtualStart);
+ const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
+ DEBUGLOG(5, "ZSTD_decompressSequences_body");
+
+ /* Regen sequences */
+ if (nbSeq) {
+ seqState_t seqState;
+ dctx->fseEntropy = 1;
+ { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
+ CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend-ip), corruption_detected);
+ ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
+ ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
+ ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
+
+ for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && nbSeq ; ) {
+ nbSeq--;
+ { seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
+ size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, prefixStart, vBase, dictEnd);
+ DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
+ if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
+ op += oneSeqSize;
+ } }
+
+ /* check if reached exact end */
+ DEBUGLOG(5, "ZSTD_decompressSequences_body: after decode loop, remaining nbSeq : %i", nbSeq);
+ if (nbSeq) return ERROR(corruption_detected);
+ /* save reps for next block */
+ { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
+ }
+
+ /* last literal segment */
+ { size_t const lastLLSize = litEnd - litPtr;
+ if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall);
+ memcpy(op, litPtr, lastLLSize);
+ op += lastLLSize;
+ }
+
+ return op-ostart;
+}
+
+static size_t
+ZSTD_decompressSequences_default(ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset)
+{
+ return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+}
+
+
+
+FORCE_INLINE_TEMPLATE seq_t
+ZSTD_decodeSequenceLong(seqState_t* seqState, ZSTD_longOffset_e const longOffsets)
+{
+ seq_t seq;
+ U32 const llBits = seqState->stateLL.table[seqState->stateLL.state].nbAdditionalBits;
+ U32 const mlBits = seqState->stateML.table[seqState->stateML.state].nbAdditionalBits;
+ U32 const ofBits = seqState->stateOffb.table[seqState->stateOffb.state].nbAdditionalBits;
+ U32 const totalBits = llBits+mlBits+ofBits;
+ U32 const llBase = seqState->stateLL.table[seqState->stateLL.state].baseValue;
+ U32 const mlBase = seqState->stateML.table[seqState->stateML.state].baseValue;
+ U32 const ofBase = seqState->stateOffb.table[seqState->stateOffb.state].baseValue;
+
+ /* sequence */
+ { size_t offset;
+ if (!ofBits)
+ offset = 0;
+ else {
+ ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1);
+ ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5);
+ assert(ofBits <= MaxOff);
+ if (MEM_32bits() && longOffsets) {
+ U32 const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN_32-1);
+ offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits);
+ if (MEM_32bits() || extraBits) BIT_reloadDStream(&seqState->DStream);
+ if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits);
+ } else {
+ offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */
+ if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);
+ }
+ }
+
+ if (ofBits <= 1) {
+ offset += (llBase==0);
+ if (offset) {
+ size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
+ temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */
+ if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];
+ seqState->prevOffset[1] = seqState->prevOffset[0];
+ seqState->prevOffset[0] = offset = temp;
+ } else {
+ offset = seqState->prevOffset[0];
+ }
+ } else {
+ seqState->prevOffset[2] = seqState->prevOffset[1];
+ seqState->prevOffset[1] = seqState->prevOffset[0];
+ seqState->prevOffset[0] = offset;
+ }
+ seq.offset = offset;
+ }
+
+ seq.matchLength = mlBase + ((mlBits>0) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0); /* <= 16 bits */
+ if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32))
+ BIT_reloadDStream(&seqState->DStream);
+ if (MEM_64bits() && (totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog)))
+ BIT_reloadDStream(&seqState->DStream);
+ /* Verify that there is enough bits to read the rest of the data in 64-bit mode. */
+ ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64);
+
+ seq.litLength = llBase + ((llBits>0) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0); /* <= 16 bits */
+ if (MEM_32bits())
+ BIT_reloadDStream(&seqState->DStream);
+
+ { size_t const pos = seqState->pos + seq.litLength;
+ const BYTE* const matchBase = (seq.offset > pos) ? seqState->dictEnd : seqState->prefixStart;
+ seq.match = matchBase + pos - seq.offset; /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted.
+ * No consequence though : no memory access will occur, overly large offset will be detected in ZSTD_execSequenceLong() */
+ seqState->pos = pos + seq.matchLength;
+ }
+
+ /* ANS state update */
+ ZSTD_updateFseState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */
+ ZSTD_updateFseState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */
+ if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */
+ ZSTD_updateFseState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */
+
+ return seq;
+}
+
+FORCE_INLINE_TEMPLATE size_t
+ZSTD_decompressSequencesLong_body(
+ ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset)
+{
+ const BYTE* ip = (const BYTE*)seqStart;
+ const BYTE* const iend = ip + seqSize;
+ BYTE* const ostart = (BYTE* const)dst;
+ BYTE* const oend = ostart + maxDstSize;
+ BYTE* op = ostart;
+ const BYTE* litPtr = dctx->litPtr;
+ const BYTE* const litEnd = litPtr + dctx->litSize;
+ const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart);
+ const BYTE* const dictStart = (const BYTE*) (dctx->virtualStart);
+ const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
+
+ /* Regen sequences */
+ if (nbSeq) {
+#define STORED_SEQS 4
+#define STOSEQ_MASK (STORED_SEQS-1)
+#define ADVANCED_SEQS 4
+ seq_t sequences[STORED_SEQS];
+ int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS);
+ seqState_t seqState;
+ int seqNb;
+ dctx->fseEntropy = 1;
+ { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
+ seqState.prefixStart = prefixStart;
+ seqState.pos = (size_t)(op-prefixStart);
+ seqState.dictEnd = dictEnd;
+ CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend-ip), corruption_detected);
+ ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
+ ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
+ ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
+
+ /* prepare in advance */
+ for (seqNb=0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && (seqNb<seqAdvance); seqNb++) {
+ sequences[seqNb] = ZSTD_decodeSequenceLong(&seqState, isLongOffset);
+ }
+ if (seqNb<seqAdvance) return ERROR(corruption_detected);
+
+ /* decode and decompress */
+ for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (seqNb<nbSeq) ; seqNb++) {
+ seq_t const sequence = ZSTD_decodeSequenceLong(&seqState, isLongOffset);
+ size_t const oneSeqSize = ZSTD_execSequenceLong(op, oend, sequences[(seqNb-ADVANCED_SEQS) & STOSEQ_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
+ if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
+ PREFETCH(sequence.match); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
+ sequences[seqNb&STOSEQ_MASK] = sequence;
+ op += oneSeqSize;
+ }
+ if (seqNb<nbSeq) return ERROR(corruption_detected);
+
+ /* finish queue */
+ seqNb -= seqAdvance;
+ for ( ; seqNb<nbSeq ; seqNb++) {
+ size_t const oneSeqSize = ZSTD_execSequenceLong(op, oend, sequences[seqNb&STOSEQ_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
+ if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
+ op += oneSeqSize;
+ }
+
+ /* save reps for next block */
+ { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
+#undef STORED_SEQS
+#undef STOSEQ_MASK
+#undef ADVANCED_SEQS
+ }
+
+ /* last literal segment */
+ { size_t const lastLLSize = litEnd - litPtr;
+ if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall);
+ memcpy(op, litPtr, lastLLSize);
+ op += lastLLSize;
+ }
+
+ return op-ostart;
+}
+
+static size_t
+ZSTD_decompressSequencesLong_default(ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset)
+{
+ return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+}
+
+
+
+#if DYNAMIC_BMI2
+
+static TARGET_ATTRIBUTE("bmi2") size_t
+ZSTD_decompressSequences_bmi2(ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset)
+{
+ return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+}
+
+static TARGET_ATTRIBUTE("bmi2") size_t
+ZSTD_decompressSequencesLong_bmi2(ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset)
+{
+ return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+}
+
+#endif
+
+typedef size_t (*ZSTD_decompressSequences_t)(
+ ZSTD_DCtx *dctx, void *dst, size_t maxDstSize,
+ const void *seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset);
+
+static size_t ZSTD_decompressSequences(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset)
+{
+ DEBUGLOG(5, "ZSTD_decompressSequences");
+#if DYNAMIC_BMI2
+ if (dctx->bmi2) {
+ return ZSTD_decompressSequences_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+ }
+#endif
+ return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+}
+
+static size_t ZSTD_decompressSequencesLong(ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset)
+{
+ DEBUGLOG(5, "ZSTD_decompressSequencesLong");
+#if DYNAMIC_BMI2
+ if (dctx->bmi2) {
+ return ZSTD_decompressSequencesLong_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+ }
+#endif
+ return ZSTD_decompressSequencesLong_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+}
+
+/* ZSTD_getLongOffsetsShare() :
+ * condition : offTable must be valid
+ * @return : "share" of long offsets (arbitrarily defined as > (1<<23))
+ * compared to maximum possible of (1<<OffFSELog) */
+static unsigned
+ZSTD_getLongOffsetsShare(const ZSTD_seqSymbol* offTable)
+{
+ const void* ptr = offTable;
+ U32 const tableLog = ((const ZSTD_seqSymbol_header*)ptr)[0].tableLog;
+ const ZSTD_seqSymbol* table = offTable + 1;
+ U32 const max = 1 << tableLog;
+ U32 u, total = 0;
+ DEBUGLOG(5, "ZSTD_getLongOffsetsShare: (tableLog=%u)", tableLog);
+
+ assert(max <= (1 << OffFSELog)); /* max not too large */
+ for (u=0; u<max; u++) {
+ if (table[u].nbAdditionalBits > 22) total += 1;
+ }
+
+ assert(tableLog <= OffFSELog);
+ total <<= (OffFSELog - tableLog); /* scale to OffFSELog */
+
+ return total;
+}
+
+
+static size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize, const int frame)
+{ /* blockType == blockCompressed */
+ const BYTE* ip = (const BYTE*)src;
+ /* isLongOffset must be true if there are long offsets.
+ * Offsets are long if they are larger than 2^STREAM_ACCUMULATOR_MIN.
+ * We don't expect that to be the case in 64-bit mode.
+ * In block mode, window size is not known, so we have to be conservative.
+ * (note: but it could be evaluated from current-lowLimit)
+ */
+ ZSTD_longOffset_e const isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (!frame || dctx->fParams.windowSize > (1ULL << STREAM_ACCUMULATOR_MIN)));
+ DEBUGLOG(5, "ZSTD_decompressBlock_internal (size : %u)", (U32)srcSize);
+
+ if (srcSize >= ZSTD_BLOCKSIZE_MAX) return ERROR(srcSize_wrong);
+
+ /* Decode literals section */
+ { size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
+ DEBUGLOG(5, "ZSTD_decodeLiteralsBlock : %u", (U32)litCSize);
+ if (ZSTD_isError(litCSize)) return litCSize;
+ ip += litCSize;
+ srcSize -= litCSize;
+ }
+
+ /* Build Decoding Tables */
+ { int nbSeq;
+ size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, srcSize);
+ if (ZSTD_isError(seqHSize)) return seqHSize;
+ ip += seqHSize;
+ srcSize -= seqHSize;
+
+ if ( (!frame || dctx->fParams.windowSize > (1<<24))
+ && (nbSeq>0) ) { /* could probably use a larger nbSeq limit */
+ U32 const shareLongOffsets = ZSTD_getLongOffsetsShare(dctx->OFTptr);
+ U32 const minShare = MEM_64bits() ? 7 : 20; /* heuristic values, correspond to 2.73% and 7.81% */
+ if (shareLongOffsets >= minShare)
+ return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset);
+ }
+
+ return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset);
+ }
+}
+
+
+static void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst)
+{
+ if (dst != dctx->previousDstEnd) { /* not contiguous */
+ dctx->dictEnd = dctx->previousDstEnd;
+ dctx->virtualStart = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart));
+ dctx->prefixStart = dst;
+ dctx->previousDstEnd = dst;
+ }
+}
+
+size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
+{
+ size_t dSize;
+ ZSTD_checkContinuity(dctx, dst);
+ dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 0);
+ dctx->previousDstEnd = (char*)dst + dSize;
+ return dSize;
+}
+
+
+/** ZSTD_insertBlock() :
+ insert `src` block into `dctx` history. Useful to track uncompressed blocks. */
+ZSTDLIB_API size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize)
+{
+ ZSTD_checkContinuity(dctx, blockStart);
+ dctx->previousDstEnd = (const char*)blockStart + blockSize;
+ return blockSize;
+}
+
+
+static size_t ZSTD_generateNxBytes(void* dst, size_t dstCapacity, BYTE value, size_t length)
+{
+ if (length > dstCapacity) return ERROR(dstSize_tooSmall);
+ memset(dst, value, length);
+ return length;
+}
+
+/** ZSTD_findFrameCompressedSize() :
+ * compatible with legacy mode
+ * `src` must point to the start of a ZSTD frame, ZSTD legacy frame, or skippable frame
+ * `srcSize` must be at least as large as the frame contained
+ * @return : the compressed size of the frame starting at `src` */
+size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
+{
+#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
+ if (ZSTD_isLegacy(src, srcSize))
+ return ZSTD_findFrameCompressedSizeLegacy(src, srcSize);
+#endif
+ if ( (srcSize >= ZSTD_skippableHeaderSize)
+ && (MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START ) {
+ return ZSTD_skippableHeaderSize + MEM_readLE32((const BYTE*)src + ZSTD_FRAMEIDSIZE);
+ } else {
+ const BYTE* ip = (const BYTE*)src;
+ const BYTE* const ipstart = ip;
+ size_t remainingSize = srcSize;
+ ZSTD_frameHeader zfh;
+
+ /* Extract Frame Header */
+ { size_t const ret = ZSTD_getFrameHeader(&zfh, src, srcSize);
+ if (ZSTD_isError(ret)) return ret;
+ if (ret > 0) return ERROR(srcSize_wrong);
+ }
+
+ ip += zfh.headerSize;
+ remainingSize -= zfh.headerSize;
+
+ /* Loop on each block */
+ while (1) {
+ blockProperties_t blockProperties;
+ size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
+ if (ZSTD_isError(cBlockSize)) return cBlockSize;
+
+ if (ZSTD_blockHeaderSize + cBlockSize > remainingSize)
+ return ERROR(srcSize_wrong);
+
+ ip += ZSTD_blockHeaderSize + cBlockSize;
+ remainingSize -= ZSTD_blockHeaderSize + cBlockSize;
+
+ if (blockProperties.lastBlock) break;
+ }
+
+ if (zfh.checksumFlag) { /* Final frame content checksum */
+ if (remainingSize < 4) return ERROR(srcSize_wrong);
+ ip += 4;
+ }
+
+ return ip - ipstart;
+ }
+}
+
+/*! ZSTD_decompressFrame() :
+* @dctx must be properly initialized */
+static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void** srcPtr, size_t *srcSizePtr)
+{
+ const BYTE* ip = (const BYTE*)(*srcPtr);
+ BYTE* const ostart = (BYTE* const)dst;
+ BYTE* const oend = ostart + dstCapacity;
+ BYTE* op = ostart;
+ size_t remainingSize = *srcSizePtr;
+
+ /* check */
+ if (remainingSize < ZSTD_frameHeaderSize_min+ZSTD_blockHeaderSize)
+ return ERROR(srcSize_wrong);
+
+ /* Frame Header */
+ { size_t const frameHeaderSize = ZSTD_frameHeaderSize(ip, ZSTD_frameHeaderSize_prefix);
+ if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize;
+ if (remainingSize < frameHeaderSize+ZSTD_blockHeaderSize)
+ return ERROR(srcSize_wrong);
+ CHECK_F( ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize) );
+ ip += frameHeaderSize; remainingSize -= frameHeaderSize;
+ }
+
+ /* Loop on each block */
+ while (1) {
+ size_t decodedSize;
+ blockProperties_t blockProperties;
+ size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
+ if (ZSTD_isError(cBlockSize)) return cBlockSize;
+
+ ip += ZSTD_blockHeaderSize;
+ remainingSize -= ZSTD_blockHeaderSize;
+ if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
+
+ switch(blockProperties.blockType)
+ {
+ case bt_compressed:
+ decodedSize = ZSTD_decompressBlock_internal(dctx, op, oend-op, ip, cBlockSize, /* frame */ 1);
+ break;
+ case bt_raw :
+ decodedSize = ZSTD_copyRawBlock(op, oend-op, ip, cBlockSize);
+ break;
+ case bt_rle :
+ decodedSize = ZSTD_generateNxBytes(op, oend-op, *ip, blockProperties.origSize);
+ break;
+ case bt_reserved :
+ default:
+ return ERROR(corruption_detected);
+ }
+
+ if (ZSTD_isError(decodedSize)) return decodedSize;
+ if (dctx->fParams.checksumFlag)
+ XXH64_update(&dctx->xxhState, op, decodedSize);
+ op += decodedSize;
+ ip += cBlockSize;
+ remainingSize -= cBlockSize;
+ if (blockProperties.lastBlock) break;
+ }
+
+ if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) {
+ if ((U64)(op-ostart) != dctx->fParams.frameContentSize) {
+ return ERROR(corruption_detected);
+ } }
+ if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */
+ U32 const checkCalc = (U32)XXH64_digest(&dctx->xxhState);
+ U32 checkRead;
+ if (remainingSize<4) return ERROR(checksum_wrong);
+ checkRead = MEM_readLE32(ip);
+ if (checkRead != checkCalc) return ERROR(checksum_wrong);
+ ip += 4;
+ remainingSize -= 4;
+ }
+
+ /* Allow caller to get size read */
+ *srcPtr = ip;
+ *srcSizePtr = remainingSize;
+ return op-ostart;
+}
+
+static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict, size_t dictSize,
+ const ZSTD_DDict* ddict)
+{
+ void* const dststart = dst;
+ int moreThan1Frame = 0;
+
+ DEBUGLOG(5, "ZSTD_decompressMultiFrame");
+ assert(dict==NULL || ddict==NULL); /* either dict or ddict set, not both */
+
+ if (ddict) {
+ dict = ZSTD_DDictDictContent(ddict);
+ dictSize = ZSTD_DDictDictSize(ddict);
+ }
+
+ while (srcSize >= ZSTD_frameHeaderSize_prefix) {
+
+#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
+ if (ZSTD_isLegacy(src, srcSize)) {
+ size_t decodedSize;
+ size_t const frameSize = ZSTD_findFrameCompressedSizeLegacy(src, srcSize);
+ if (ZSTD_isError(frameSize)) return frameSize;
+ /* legacy support is not compatible with static dctx */
+ if (dctx->staticSize) return ERROR(memory_allocation);
+
+ decodedSize = ZSTD_decompressLegacy(dst, dstCapacity, src, frameSize, dict, dictSize);
+
+ dst = (BYTE*)dst + decodedSize;
+ dstCapacity -= decodedSize;
+
+ src = (const BYTE*)src + frameSize;
+ srcSize -= frameSize;
+
+ continue;
+ }
+#endif
+
+ { U32 const magicNumber = MEM_readLE32(src);
+ DEBUGLOG(4, "reading magic number %08X (expecting %08X)",
+ (U32)magicNumber, (U32)ZSTD_MAGICNUMBER);
+ if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
+ size_t skippableSize;
+ if (srcSize < ZSTD_skippableHeaderSize)
+ return ERROR(srcSize_wrong);
+ skippableSize = MEM_readLE32((const BYTE*)src + ZSTD_FRAMEIDSIZE)
+ + ZSTD_skippableHeaderSize;
+ if (srcSize < skippableSize) return ERROR(srcSize_wrong);
+
+ src = (const BYTE *)src + skippableSize;
+ srcSize -= skippableSize;
+ continue;
+ } }
+
+ if (ddict) {
+ /* we were called from ZSTD_decompress_usingDDict */
+ CHECK_F(ZSTD_decompressBegin_usingDDict(dctx, ddict));
+ } else {
+ /* this will initialize correctly with no dict if dict == NULL, so
+ * use this in all cases but ddict */
+ CHECK_F(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize));
+ }
+ ZSTD_checkContinuity(dctx, dst);
+
+ { const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity,
+ &src, &srcSize);
+ if ( (ZSTD_getErrorCode(res) == ZSTD_error_prefix_unknown)
+ && (moreThan1Frame==1) ) {
+ /* at least one frame successfully completed,
+ * but following bytes are garbage :
+ * it's more likely to be a srcSize error,
+ * specifying more bytes than compressed size of frame(s).
+ * This error message replaces ERROR(prefix_unknown),
+ * which would be confusing, as the first header is actually correct.
+ * Note that one could be unlucky, it might be a corruption error instead,
+ * happening right at the place where we expect zstd magic bytes.
+ * But this is _much_ less likely than a srcSize field error. */
+ return ERROR(srcSize_wrong);
+ }
+ if (ZSTD_isError(res)) return res;
+ /* no need to bound check, ZSTD_decompressFrame already has */
+ dst = (BYTE*)dst + res;
+ dstCapacity -= res;
+ }
+ moreThan1Frame = 1;
+ } /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */
+
+ if (srcSize) return ERROR(srcSize_wrong); /* input not entirely consumed */
+
+ return (BYTE*)dst - (BYTE*)dststart;
+}
+
+size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict, size_t dictSize)
+{
+ return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, dict, dictSize, NULL);
+}
+
+
+size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+ return ZSTD_decompress_usingDict(dctx, dst, dstCapacity, src, srcSize, NULL, 0);
+}
+
+
+size_t ZSTD_decompress(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+#if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE>=1)
+ size_t regenSize;
+ ZSTD_DCtx* const dctx = ZSTD_createDCtx();
+ if (dctx==NULL) return ERROR(memory_allocation);
+ regenSize = ZSTD_decompressDCtx(dctx, dst, dstCapacity, src, srcSize);
+ ZSTD_freeDCtx(dctx);
+ return regenSize;
+#else /* stack mode */
+ ZSTD_DCtx dctx;
+ ZSTD_initDCtx_internal(&dctx);
+ return ZSTD_decompressDCtx(&dctx, dst, dstCapacity, src, srcSize);
+#endif
+}
+
+
+/*-**************************************
+* Advanced Streaming Decompression API
+* Bufferless and synchronous
+****************************************/
+size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx) { return dctx->expected; }
+
+ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx) {
+ switch(dctx->stage)
+ {
+ default: /* should not happen */
+ assert(0);
+ case ZSTDds_getFrameHeaderSize:
+ case ZSTDds_decodeFrameHeader:
+ return ZSTDnit_frameHeader;
+ case ZSTDds_decodeBlockHeader:
+ return ZSTDnit_blockHeader;
+ case ZSTDds_decompressBlock:
+ return ZSTDnit_block;
+ case ZSTDds_decompressLastBlock:
+ return ZSTDnit_lastBlock;
+ case ZSTDds_checkChecksum:
+ return ZSTDnit_checksum;
+ case ZSTDds_decodeSkippableHeader:
+ case ZSTDds_skipFrame:
+ return ZSTDnit_skippableFrame;
+ }
+}
+
+static int ZSTD_isSkipFrame(ZSTD_DCtx* dctx) { return dctx->stage == ZSTDds_skipFrame; }
+
+/** ZSTD_decompressContinue() :
+ * srcSize : must be the exact nb of bytes expected (see ZSTD_nextSrcSizeToDecompress())
+ * @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity)
+ * or an error code, which can be tested using ZSTD_isError() */
+size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+ DEBUGLOG(5, "ZSTD_decompressContinue (srcSize:%u)", (U32)srcSize);
+ /* Sanity check */
+ if (srcSize != dctx->expected) return ERROR(srcSize_wrong); /* not allowed */
+ if (dstCapacity) ZSTD_checkContinuity(dctx, dst);
+
+ switch (dctx->stage)
+ {
+ case ZSTDds_getFrameHeaderSize :
+ assert(src != NULL);
+ if (dctx->format == ZSTD_f_zstd1) { /* allows header */
+ assert(srcSize >= ZSTD_FRAMEIDSIZE); /* to read skippable magic number */
+ if ((MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
+ memcpy(dctx->headerBuffer, src, srcSize);
+ dctx->expected = ZSTD_skippableHeaderSize - srcSize; /* remaining to load to get full skippable frame header */
+ dctx->stage = ZSTDds_decodeSkippableHeader;
+ return 0;
+ } }
+ dctx->headerSize = ZSTD_frameHeaderSize_internal(src, srcSize, dctx->format);
+ if (ZSTD_isError(dctx->headerSize)) return dctx->headerSize;
+ memcpy(dctx->headerBuffer, src, srcSize);
+ dctx->expected = dctx->headerSize - srcSize;
+ dctx->stage = ZSTDds_decodeFrameHeader;
+ return 0;
+
+ case ZSTDds_decodeFrameHeader:
+ assert(src != NULL);
+ memcpy(dctx->headerBuffer + (dctx->headerSize - srcSize), src, srcSize);
+ CHECK_F(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize));
+ dctx->expected = ZSTD_blockHeaderSize;
+ dctx->stage = ZSTDds_decodeBlockHeader;
+ return 0;
+
+ case ZSTDds_decodeBlockHeader:
+ { blockProperties_t bp;
+ size_t const cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);
+ if (ZSTD_isError(cBlockSize)) return cBlockSize;
+ dctx->expected = cBlockSize;
+ dctx->bType = bp.blockType;
+ dctx->rleSize = bp.origSize;
+ if (cBlockSize) {
+ dctx->stage = bp.lastBlock ? ZSTDds_decompressLastBlock : ZSTDds_decompressBlock;
+ return 0;
+ }
+ /* empty block */
+ if (bp.lastBlock) {
+ if (dctx->fParams.checksumFlag) {
+ dctx->expected = 4;
+ dctx->stage = ZSTDds_checkChecksum;
+ } else {
+ dctx->expected = 0; /* end of frame */
+ dctx->stage = ZSTDds_getFrameHeaderSize;
+ }
+ } else {
+ dctx->expected = ZSTD_blockHeaderSize; /* jump to next header */
+ dctx->stage = ZSTDds_decodeBlockHeader;
+ }
+ return 0;
+ }
+
+ case ZSTDds_decompressLastBlock:
+ case ZSTDds_decompressBlock:
+ DEBUGLOG(5, "ZSTD_decompressContinue: case ZSTDds_decompressBlock");
+ { size_t rSize;
+ switch(dctx->bType)
+ {
+ case bt_compressed:
+ DEBUGLOG(5, "ZSTD_decompressContinue: case bt_compressed");
+ rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 1);
+ break;
+ case bt_raw :
+ rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize);
+ break;
+ case bt_rle :
+ rSize = ZSTD_setRleBlock(dst, dstCapacity, src, srcSize, dctx->rleSize);
+ break;
+ case bt_reserved : /* should never happen */
+ default:
+ return ERROR(corruption_detected);
+ }
+ if (ZSTD_isError(rSize)) return rSize;
+ DEBUGLOG(5, "ZSTD_decompressContinue: decoded size from block : %u", (U32)rSize);
+ dctx->decodedSize += rSize;
+ if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, dst, rSize);
+
+ if (dctx->stage == ZSTDds_decompressLastBlock) { /* end of frame */
+ DEBUGLOG(4, "ZSTD_decompressContinue: decoded size from frame : %u", (U32)dctx->decodedSize);
+ if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) {
+ if (dctx->decodedSize != dctx->fParams.frameContentSize) {
+ return ERROR(corruption_detected);
+ } }
+ if (dctx->fParams.checksumFlag) { /* another round for frame checksum */
+ dctx->expected = 4;
+ dctx->stage = ZSTDds_checkChecksum;
+ } else {
+ dctx->expected = 0; /* ends here */
+ dctx->stage = ZSTDds_getFrameHeaderSize;
+ }
+ } else {
+ dctx->stage = ZSTDds_decodeBlockHeader;
+ dctx->expected = ZSTD_blockHeaderSize;
+ dctx->previousDstEnd = (char*)dst + rSize;
+ }
+ return rSize;
+ }
+
+ case ZSTDds_checkChecksum:
+ assert(srcSize == 4); /* guaranteed by dctx->expected */
+ { U32 const h32 = (U32)XXH64_digest(&dctx->xxhState);
+ U32 const check32 = MEM_readLE32(src);
+ DEBUGLOG(4, "ZSTD_decompressContinue: checksum : calculated %08X :: %08X read", h32, check32);
+ if (check32 != h32) return ERROR(checksum_wrong);
+ dctx->expected = 0;
+ dctx->stage = ZSTDds_getFrameHeaderSize;
+ return 0;
+ }
+
+ case ZSTDds_decodeSkippableHeader:
+ assert(src != NULL);
+ assert(srcSize <= ZSTD_skippableHeaderSize);
+ memcpy(dctx->headerBuffer + (ZSTD_skippableHeaderSize - srcSize), src, srcSize); /* complete skippable header */
+ dctx->expected = MEM_readLE32(dctx->headerBuffer + ZSTD_FRAMEIDSIZE); /* note : dctx->expected can grow seriously large, beyond local buffer size */
+ dctx->stage = ZSTDds_skipFrame;
+ return 0;
+
+ case ZSTDds_skipFrame:
+ dctx->expected = 0;
+ dctx->stage = ZSTDds_getFrameHeaderSize;
+ return 0;
+
+ default:
+ return ERROR(GENERIC); /* impossible */
+ }
+}
+
+
+static size_t ZSTD_refDictContent(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
+{
+ dctx->dictEnd = dctx->previousDstEnd;
+ dctx->virtualStart = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart));
+ dctx->prefixStart = dict;
+ dctx->previousDstEnd = (const char*)dict + dictSize;
+ return 0;
+}
+
+/*! ZSTD_loadEntropy() :
+ * dict : must point at beginning of a valid zstd dictionary.
+ * @return : size of entropy tables read */
+static size_t ZSTD_loadEntropy(ZSTD_entropyDTables_t* entropy,
+ const void* const dict, size_t const dictSize)
+{
+ const BYTE* dictPtr = (const BYTE*)dict;
+ const BYTE* const dictEnd = dictPtr + dictSize;
+
+ if (dictSize <= 8) return ERROR(dictionary_corrupted);
+ assert(MEM_readLE32(dict) == ZSTD_MAGIC_DICTIONARY); /* dict must be valid */
+ dictPtr += 8; /* skip header = magic + dictID */
+
+ ZSTD_STATIC_ASSERT(offsetof(ZSTD_entropyDTables_t, OFTable) == offsetof(ZSTD_entropyDTables_t, LLTable) + sizeof(entropy->LLTable));
+ ZSTD_STATIC_ASSERT(offsetof(ZSTD_entropyDTables_t, MLTable) == offsetof(ZSTD_entropyDTables_t, OFTable) + sizeof(entropy->OFTable));
+ ZSTD_STATIC_ASSERT(sizeof(entropy->LLTable) + sizeof(entropy->OFTable) + sizeof(entropy->MLTable) >= HUF_DECOMPRESS_WORKSPACE_SIZE);
+ { void* const workspace = &entropy->LLTable; /* use fse tables as temporary workspace; implies fse tables are grouped together */
+ size_t const workspaceSize = sizeof(entropy->LLTable) + sizeof(entropy->OFTable) + sizeof(entropy->MLTable);
+ size_t const hSize = HUF_readDTableX2_wksp(entropy->hufTable,
+ dictPtr, dictEnd - dictPtr,
+ workspace, workspaceSize);
+ if (HUF_isError(hSize)) return ERROR(dictionary_corrupted);
+ dictPtr += hSize;
+ }
+
+ { short offcodeNCount[MaxOff+1];
+ U32 offcodeMaxValue = MaxOff, offcodeLog;
+ size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
+ if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
+ if (offcodeMaxValue > MaxOff) return ERROR(dictionary_corrupted);
+ if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted);
+ ZSTD_buildFSETable( entropy->OFTable,
+ offcodeNCount, offcodeMaxValue,
+ OF_base, OF_bits,
+ offcodeLog);
+ dictPtr += offcodeHeaderSize;
+ }
+
+ { short matchlengthNCount[MaxML+1];
+ unsigned matchlengthMaxValue = MaxML, matchlengthLog;
+ size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
+ if (FSE_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted);
+ if (matchlengthMaxValue > MaxML) return ERROR(dictionary_corrupted);
+ if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted);
+ ZSTD_buildFSETable( entropy->MLTable,
+ matchlengthNCount, matchlengthMaxValue,
+ ML_base, ML_bits,
+ matchlengthLog);
+ dictPtr += matchlengthHeaderSize;
+ }
+
+ { short litlengthNCount[MaxLL+1];
+ unsigned litlengthMaxValue = MaxLL, litlengthLog;
+ size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
+ if (FSE_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted);
+ if (litlengthMaxValue > MaxLL) return ERROR(dictionary_corrupted);
+ if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted);
+ ZSTD_buildFSETable( entropy->LLTable,
+ litlengthNCount, litlengthMaxValue,
+ LL_base, LL_bits,
+ litlengthLog);
+ dictPtr += litlengthHeaderSize;
+ }
+
+ if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted);
+ { int i;
+ size_t const dictContentSize = (size_t)(dictEnd - (dictPtr+12));
+ for (i=0; i<3; i++) {
+ U32 const rep = MEM_readLE32(dictPtr); dictPtr += 4;
+ if (rep==0 || rep >= dictContentSize) return ERROR(dictionary_corrupted);
+ entropy->rep[i] = rep;
+ } }
+
+ return dictPtr - (const BYTE*)dict;
+}
+
+static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
+{
+ if (dictSize < 8) return ZSTD_refDictContent(dctx, dict, dictSize);
+ { U32 const magic = MEM_readLE32(dict);
+ if (magic != ZSTD_MAGIC_DICTIONARY) {
+ return ZSTD_refDictContent(dctx, dict, dictSize); /* pure content mode */
+ } }
+ dctx->dictID = MEM_readLE32((const char*)dict + ZSTD_FRAMEIDSIZE);
+
+ /* load entropy tables */
+ { size_t const eSize = ZSTD_loadEntropy(&dctx->entropy, dict, dictSize);
+ if (ZSTD_isError(eSize)) return ERROR(dictionary_corrupted);
+ dict = (const char*)dict + eSize;
+ dictSize -= eSize;
+ }
+ dctx->litEntropy = dctx->fseEntropy = 1;
+
+ /* reference dictionary content */
+ return ZSTD_refDictContent(dctx, dict, dictSize);
+}
+
+size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx)
+{
+ assert(dctx != NULL);
+ dctx->expected = ZSTD_startingInputLength(dctx->format); /* dctx->format must be properly set */
+ dctx->stage = ZSTDds_getFrameHeaderSize;
+ dctx->decodedSize = 0;
+ dctx->previousDstEnd = NULL;
+ dctx->prefixStart = NULL;
+ dctx->virtualStart = NULL;
+ dctx->dictEnd = NULL;
+ dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
+ dctx->litEntropy = dctx->fseEntropy = 0;
+ dctx->dictID = 0;
+ ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue));
+ memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue)); /* initial repcodes */
+ dctx->LLTptr = dctx->entropy.LLTable;
+ dctx->MLTptr = dctx->entropy.MLTable;
+ dctx->OFTptr = dctx->entropy.OFTable;
+ dctx->HUFptr = dctx->entropy.hufTable;
+ return 0;
+}
+
+size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
+{
+ CHECK_F( ZSTD_decompressBegin(dctx) );
+ if (dict && dictSize)
+ CHECK_E(ZSTD_decompress_insertDictionary(dctx, dict, dictSize), dictionary_corrupted);
+ return 0;
+}
+
+
+/* ====== ZSTD_DDict ====== */
+
+struct ZSTD_DDict_s {
+ void* dictBuffer;
+ const void* dictContent;
+ size_t dictSize;
+ ZSTD_entropyDTables_t entropy;
+ U32 dictID;
+ U32 entropyPresent;
+ ZSTD_customMem cMem;
+}; /* typedef'd to ZSTD_DDict within "zstd.h" */
+
+static const void* ZSTD_DDictDictContent(const ZSTD_DDict* ddict)
+{
+ assert(ddict != NULL);
+ return ddict->dictContent;
+}
+
+static size_t ZSTD_DDictDictSize(const ZSTD_DDict* ddict)
+{
+ assert(ddict != NULL);
+ return ddict->dictSize;
+}
+
+size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
+{
+ DEBUGLOG(4, "ZSTD_decompressBegin_usingDDict");
+ assert(dctx != NULL);
+ if (ddict) {
+ dctx->ddictIsCold = (dctx->dictEnd != (const char*)ddict->dictContent + ddict->dictSize);
+ DEBUGLOG(4, "DDict is %s",
+ dctx->ddictIsCold ? "~cold~" : "hot!");
+ }
+ CHECK_F( ZSTD_decompressBegin(dctx) );
+ if (ddict) { /* NULL ddict is equivalent to no dictionary */
+ dctx->dictID = ddict->dictID;
+ dctx->prefixStart = ddict->dictContent;
+ dctx->virtualStart = ddict->dictContent;
+ dctx->dictEnd = (const BYTE*)ddict->dictContent + ddict->dictSize;
+ dctx->previousDstEnd = dctx->dictEnd;
+ if (ddict->entropyPresent) {
+ dctx->litEntropy = 1;
+ dctx->fseEntropy = 1;
+ dctx->LLTptr = ddict->entropy.LLTable;
+ dctx->MLTptr = ddict->entropy.MLTable;
+ dctx->OFTptr = ddict->entropy.OFTable;
+ dctx->HUFptr = ddict->entropy.hufTable;
+ dctx->entropy.rep[0] = ddict->entropy.rep[0];
+ dctx->entropy.rep[1] = ddict->entropy.rep[1];
+ dctx->entropy.rep[2] = ddict->entropy.rep[2];
+ } else {
+ dctx->litEntropy = 0;
+ dctx->fseEntropy = 0;
+ }
+ }
+ return 0;
+}
+
+static size_t
+ZSTD_loadEntropy_inDDict(ZSTD_DDict* ddict,
+ ZSTD_dictContentType_e dictContentType)
+{
+ ddict->dictID = 0;
+ ddict->entropyPresent = 0;
+ if (dictContentType == ZSTD_dct_rawContent) return 0;
+
+ if (ddict->dictSize < 8) {
+ if (dictContentType == ZSTD_dct_fullDict)
+ return ERROR(dictionary_corrupted); /* only accept specified dictionaries */
+ return 0; /* pure content mode */
+ }
+ { U32 const magic = MEM_readLE32(ddict->dictContent);
+ if (magic != ZSTD_MAGIC_DICTIONARY) {
+ if (dictContentType == ZSTD_dct_fullDict)
+ return ERROR(dictionary_corrupted); /* only accept specified dictionaries */
+ return 0; /* pure content mode */
+ }
+ }
+ ddict->dictID = MEM_readLE32((const char*)ddict->dictContent + ZSTD_FRAMEIDSIZE);
+
+ /* load entropy tables */
+ CHECK_E( ZSTD_loadEntropy(&ddict->entropy,
+ ddict->dictContent, ddict->dictSize),
+ dictionary_corrupted );
+ ddict->entropyPresent = 1;
+ return 0;
+}
+
+
+static size_t ZSTD_initDDict_internal(ZSTD_DDict* ddict,
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType)
+{
+ if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dict) || (!dictSize)) {
+ ddict->dictBuffer = NULL;
+ ddict->dictContent = dict;
+ if (!dict) dictSize = 0;
+ } else {
+ void* const internalBuffer = ZSTD_malloc(dictSize, ddict->cMem);
+ ddict->dictBuffer = internalBuffer;
+ ddict->dictContent = internalBuffer;
+ if (!internalBuffer) return ERROR(memory_allocation);
+ memcpy(internalBuffer, dict, dictSize);
+ }
+ ddict->dictSize = dictSize;
+ ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
+
+ /* parse dictionary content */
+ CHECK_F( ZSTD_loadEntropy_inDDict(ddict, dictContentType) );
+
+ return 0;
+}
+
+ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_customMem customMem)
+{
+ if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
+
+ { ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_malloc(sizeof(ZSTD_DDict), customMem);
+ if (ddict == NULL) return NULL;
+ ddict->cMem = customMem;
+ { size_t const initResult = ZSTD_initDDict_internal(ddict,
+ dict, dictSize,
+ dictLoadMethod, dictContentType);
+ if (ZSTD_isError(initResult)) {
+ ZSTD_freeDDict(ddict);
+ return NULL;
+ } }
+ return ddict;
+ }
+}
+
+/*! ZSTD_createDDict() :
+* Create a digested dictionary, to start decompression without startup delay.
+* `dict` content is copied inside DDict.
+* Consequently, `dict` can be released after `ZSTD_DDict` creation */
+ZSTD_DDict* ZSTD_createDDict(const void* dict, size_t dictSize)
+{
+ ZSTD_customMem const allocator = { NULL, NULL, NULL };
+ return ZSTD_createDDict_advanced(dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto, allocator);
+}
+
+/*! ZSTD_createDDict_byReference() :
+ * Create a digested dictionary, to start decompression without startup delay.
+ * Dictionary content is simply referenced, it will be accessed during decompression.
+ * Warning : dictBuffer must outlive DDict (DDict must be freed before dictBuffer) */
+ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize)
+{
+ ZSTD_customMem const allocator = { NULL, NULL, NULL };
+ return ZSTD_createDDict_advanced(dictBuffer, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto, allocator);
+}
+
+
+const ZSTD_DDict* ZSTD_initStaticDDict(
+ void* sBuffer, size_t sBufferSize,
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType)
+{
+ size_t const neededSpace = sizeof(ZSTD_DDict)
+ + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
+ ZSTD_DDict* const ddict = (ZSTD_DDict*)sBuffer;
+ assert(sBuffer != NULL);
+ assert(dict != NULL);
+ if ((size_t)sBuffer & 7) return NULL; /* 8-aligned */
+ if (sBufferSize < neededSpace) return NULL;
+ if (dictLoadMethod == ZSTD_dlm_byCopy) {
+ memcpy(ddict+1, dict, dictSize); /* local copy */
+ dict = ddict+1;
+ }
+ if (ZSTD_isError( ZSTD_initDDict_internal(ddict,
+ dict, dictSize,
+ ZSTD_dlm_byRef, dictContentType) ))
+ return NULL;
+ return ddict;
+}
+
+
+size_t ZSTD_freeDDict(ZSTD_DDict* ddict)
+{
+ if (ddict==NULL) return 0; /* support free on NULL */
+ { ZSTD_customMem const cMem = ddict->cMem;
+ ZSTD_free(ddict->dictBuffer, cMem);
+ ZSTD_free(ddict, cMem);
+ return 0;
+ }
+}
+
+/*! ZSTD_estimateDDictSize() :
+ * Estimate amount of memory that will be needed to create a dictionary for decompression.
+ * Note : dictionary created by reference using ZSTD_dlm_byRef are smaller */
+size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod)
+{
+ return sizeof(ZSTD_DDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
+}
+
+size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict)
+{
+ if (ddict==NULL) return 0; /* support sizeof on NULL */
+ return sizeof(*ddict) + (ddict->dictBuffer ? ddict->dictSize : 0) ;
+}
+
+/*! ZSTD_getDictID_fromDict() :
+ * Provides the dictID stored within dictionary.
+ * if @return == 0, the dictionary is not conformant with Zstandard specification.
+ * It can still be loaded, but as a content-only dictionary. */
+unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize)
+{
+ if (dictSize < 8) return 0;
+ if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) return 0;
+ return MEM_readLE32((const char*)dict + ZSTD_FRAMEIDSIZE);
+}
+
+/*! ZSTD_getDictID_fromDDict() :
+ * Provides the dictID of the dictionary loaded into `ddict`.
+ * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
+ * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
+unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict)
+{
+ if (ddict==NULL) return 0;
+ return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize);
+}
+
+/*! ZSTD_getDictID_fromFrame() :
+ * Provides the dictID required to decompresse frame stored within `src`.
+ * If @return == 0, the dictID could not be decoded.
+ * This could for one of the following reasons :
+ * - The frame does not require a dictionary (most common case).
+ * - The frame was built with dictID intentionally removed.
+ * Needed dictionary is a hidden information.
+ * Note : this use case also happens when using a non-conformant dictionary.
+ * - `srcSize` is too small, and as a result, frame header could not be decoded.
+ * Note : possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`.
+ * - This is not a Zstandard frame.
+ * When identifying the exact failure cause, it's possible to use
+ * ZSTD_getFrameHeader(), which will provide a more precise error code. */
+unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize)
+{
+ ZSTD_frameHeader zfp = { 0, 0, 0, ZSTD_frame, 0, 0, 0 };
+ size_t const hError = ZSTD_getFrameHeader(&zfp, src, srcSize);
+ if (ZSTD_isError(hError)) return 0;
+ return zfp.dictID;
+}
+
+
+/*! ZSTD_decompress_usingDDict() :
+* Decompression using a pre-digested Dictionary
+* Use dictionary without significant overhead. */
+size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_DDict* ddict)
+{
+ /* pass content and size in case legacy frames are encountered */
+ return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize,
+ NULL, 0,
+ ddict);
+}
+
+
+/*=====================================
+* Streaming decompression
+*====================================*/
+
+ZSTD_DStream* ZSTD_createDStream(void)
+{
+ DEBUGLOG(3, "ZSTD_createDStream");
+ return ZSTD_createDStream_advanced(ZSTD_defaultCMem);
+}
+
+ZSTD_DStream* ZSTD_initStaticDStream(void *workspace, size_t workspaceSize)
+{
+ return ZSTD_initStaticDCtx(workspace, workspaceSize);
+}
+
+ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem)
+{
+ return ZSTD_createDCtx_advanced(customMem);
+}
+
+size_t ZSTD_freeDStream(ZSTD_DStream* zds)
+{
+ return ZSTD_freeDCtx(zds);
+}
+
+
+/* *** Initialization *** */
+
+size_t ZSTD_DStreamInSize(void) { return ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize; }
+size_t ZSTD_DStreamOutSize(void) { return ZSTD_BLOCKSIZE_MAX; }
+
+size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx,
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType)
+{
+ if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
+ ZSTD_freeDDict(dctx->ddictLocal);
+ if (dict && dictSize >= 8) {
+ dctx->ddictLocal = ZSTD_createDDict_advanced(dict, dictSize, dictLoadMethod, dictContentType, dctx->customMem);
+ if (dctx->ddictLocal == NULL) return ERROR(memory_allocation);
+ } else {
+ dctx->ddictLocal = NULL;
+ }
+ dctx->ddict = dctx->ddictLocal;
+ return 0;
+}
+
+size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
+{
+ return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto);
+}
+
+size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
+{
+ return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto);
+}
+
+size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
+{
+ return ZSTD_DCtx_loadDictionary_advanced(dctx, prefix, prefixSize, ZSTD_dlm_byRef, dictContentType);
+}
+
+size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize)
+{
+ return ZSTD_DCtx_refPrefix_advanced(dctx, prefix, prefixSize, ZSTD_dct_rawContent);
+}
+
+
+/* ZSTD_initDStream_usingDict() :
+ * return : expected size, aka ZSTD_frameHeaderSize_prefix.
+ * this function cannot fail */
+size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize)
+{
+ DEBUGLOG(4, "ZSTD_initDStream_usingDict");
+ zds->streamStage = zdss_init;
+ zds->noForwardProgress = 0;
+ CHECK_F( ZSTD_DCtx_loadDictionary(zds, dict, dictSize) );
+ return ZSTD_frameHeaderSize_prefix;
+}
+
+/* note : this variant can't fail */
+size_t ZSTD_initDStream(ZSTD_DStream* zds)
+{
+ DEBUGLOG(4, "ZSTD_initDStream");
+ return ZSTD_initDStream_usingDict(zds, NULL, 0);
+}
+
+/* ZSTD_initDStream_usingDDict() :
+ * ddict will just be referenced, and must outlive decompression session
+ * this function cannot fail */
+size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict)
+{
+ size_t const initResult = ZSTD_initDStream(dctx);
+ dctx->ddict = ddict;
+ return initResult;
+}
+
+/* ZSTD_resetDStream() :
+ * return : expected size, aka ZSTD_frameHeaderSize_prefix.
+ * this function cannot fail */
+size_t ZSTD_resetDStream(ZSTD_DStream* dctx)
+{
+ DEBUGLOG(4, "ZSTD_resetDStream");
+ dctx->streamStage = zdss_loadHeader;
+ dctx->lhSize = dctx->inPos = dctx->outStart = dctx->outEnd = 0;
+ dctx->legacyVersion = 0;
+ dctx->hostageByte = 0;
+ return ZSTD_frameHeaderSize_prefix;
+}
+
+size_t ZSTD_setDStreamParameter(ZSTD_DStream* dctx,
+ ZSTD_DStreamParameter_e paramType, unsigned paramValue)
+{
+ if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
+ switch(paramType)
+ {
+ default : return ERROR(parameter_unsupported);
+ case DStream_p_maxWindowSize :
+ DEBUGLOG(4, "setting maxWindowSize = %u KB", paramValue >> 10);
+ dctx->maxWindowSize = paramValue ? paramValue : (U32)(-1);
+ break;
+ }
+ return 0;
+}
+
+size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
+{
+ if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
+ dctx->ddict = ddict;
+ return 0;
+}
+
+size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize)
+{
+ if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
+ dctx->maxWindowSize = maxWindowSize;
+ return 0;
+}
+
+size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format)
+{
+ DEBUGLOG(4, "ZSTD_DCtx_setFormat : %u", (unsigned)format);
+ if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
+ dctx->format = format;
+ return 0;
+}
+
+
+size_t ZSTD_sizeof_DStream(const ZSTD_DStream* dctx)
+{
+ return ZSTD_sizeof_DCtx(dctx);
+}
+
+size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize)
+{
+ size_t const blockSize = (size_t) MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
+ unsigned long long const neededRBSize = windowSize + blockSize + (WILDCOPY_OVERLENGTH * 2);
+ unsigned long long const neededSize = MIN(frameContentSize, neededRBSize);
+ size_t const minRBSize = (size_t) neededSize;
+ if ((unsigned long long)minRBSize != neededSize) return ERROR(frameParameter_windowTooLarge);
+ return minRBSize;
+}
+
+size_t ZSTD_estimateDStreamSize(size_t windowSize)
+{
+ size_t const blockSize = MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
+ size_t const inBuffSize = blockSize; /* no block can be larger */
+ size_t const outBuffSize = ZSTD_decodingBufferSize_min(windowSize, ZSTD_CONTENTSIZE_UNKNOWN);
+ return ZSTD_estimateDCtxSize() + inBuffSize + outBuffSize;
+}
+
+size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize)
+{
+ U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX; /* note : should be user-selectable */
+ ZSTD_frameHeader zfh;
+ size_t const err = ZSTD_getFrameHeader(&zfh, src, srcSize);
+ if (ZSTD_isError(err)) return err;
+ if (err>0) return ERROR(srcSize_wrong);
+ if (zfh.windowSize > windowSizeMax)
+ return ERROR(frameParameter_windowTooLarge);
+ return ZSTD_estimateDStreamSize((size_t)zfh.windowSize);
+}
+
+
+/* ***** Decompression ***** */
+
+MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+ size_t const length = MIN(dstCapacity, srcSize);
+ memcpy(dst, src, length);
+ return length;
+}
+
+
+size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
+{
+ const char* const istart = (const char*)(input->src) + input->pos;
+ const char* const iend = (const char*)(input->src) + input->size;
+ const char* ip = istart;
+ char* const ostart = (char*)(output->dst) + output->pos;
+ char* const oend = (char*)(output->dst) + output->size;
+ char* op = ostart;
+ U32 someMoreWork = 1;
+
+ DEBUGLOG(5, "ZSTD_decompressStream");
+ if (input->pos > input->size) { /* forbidden */
+ DEBUGLOG(5, "in: pos: %u vs size: %u",
+ (U32)input->pos, (U32)input->size);
+ return ERROR(srcSize_wrong);
+ }
+ if (output->pos > output->size) { /* forbidden */
+ DEBUGLOG(5, "out: pos: %u vs size: %u",
+ (U32)output->pos, (U32)output->size);
+ return ERROR(dstSize_tooSmall);
+ }
+ DEBUGLOG(5, "input size : %u", (U32)(input->size - input->pos));
+
+ while (someMoreWork) {
+ switch(zds->streamStage)
+ {
+ case zdss_init :
+ DEBUGLOG(5, "stage zdss_init => transparent reset ");
+ ZSTD_resetDStream(zds); /* transparent reset on starting decoding a new frame */
+ /* fall-through */
+
+ case zdss_loadHeader :
+ DEBUGLOG(5, "stage zdss_loadHeader (srcSize : %u)", (U32)(iend - ip));
+#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
+ if (zds->legacyVersion) {
+ /* legacy support is incompatible with static dctx */
+ if (zds->staticSize) return ERROR(memory_allocation);
+ { size_t const hint = ZSTD_decompressLegacyStream(zds->legacyContext, zds->legacyVersion, output, input);
+ if (hint==0) zds->streamStage = zdss_init;
+ return hint;
+ } }
+#endif
+ { size_t const hSize = ZSTD_getFrameHeader_advanced(&zds->fParams, zds->headerBuffer, zds->lhSize, zds->format);
+ DEBUGLOG(5, "header size : %u", (U32)hSize);
+ if (ZSTD_isError(hSize)) {
+#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
+ U32 const legacyVersion = ZSTD_isLegacy(istart, iend-istart);
+ if (legacyVersion) {
+ const void* const dict = zds->ddict ? zds->ddict->dictContent : NULL;
+ size_t const dictSize = zds->ddict ? zds->ddict->dictSize : 0;
+ DEBUGLOG(5, "ZSTD_decompressStream: detected legacy version v0.%u", legacyVersion);
+ /* legacy support is incompatible with static dctx */
+ if (zds->staticSize) return ERROR(memory_allocation);
+ CHECK_F(ZSTD_initLegacyStream(&zds->legacyContext,
+ zds->previousLegacyVersion, legacyVersion,
+ dict, dictSize));
+ zds->legacyVersion = zds->previousLegacyVersion = legacyVersion;
+ { size_t const hint = ZSTD_decompressLegacyStream(zds->legacyContext, legacyVersion, output, input);
+ if (hint==0) zds->streamStage = zdss_init; /* or stay in stage zdss_loadHeader */
+ return hint;
+ } }
+#endif
+ return hSize; /* error */
+ }
+ if (hSize != 0) { /* need more input */
+ size_t const toLoad = hSize - zds->lhSize; /* if hSize!=0, hSize > zds->lhSize */
+ size_t const remainingInput = (size_t)(iend-ip);
+ assert(iend >= ip);
+ if (toLoad > remainingInput) { /* not enough input to load full header */
+ if (remainingInput > 0) {
+ memcpy(zds->headerBuffer + zds->lhSize, ip, remainingInput);
+ zds->lhSize += remainingInput;
+ }
+ input->pos = input->size;
+ return (MAX(ZSTD_frameHeaderSize_min, hSize) - zds->lhSize) + ZSTD_blockHeaderSize; /* remaining header bytes + next block header */
+ }
+ assert(ip != NULL);
+ memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad); zds->lhSize = hSize; ip += toLoad;
+ break;
+ } }
+
+ /* check for single-pass mode opportunity */
+ if (zds->fParams.frameContentSize && zds->fParams.windowSize /* skippable frame if == 0 */
+ && (U64)(size_t)(oend-op) >= zds->fParams.frameContentSize) {
+ size_t const cSize = ZSTD_findFrameCompressedSize(istart, iend-istart);
+ if (cSize <= (size_t)(iend-istart)) {
+ /* shortcut : using single-pass mode */
+ size_t const decompressedSize = ZSTD_decompress_usingDDict(zds, op, oend-op, istart, cSize, zds->ddict);
+ if (ZSTD_isError(decompressedSize)) return decompressedSize;
+ DEBUGLOG(4, "shortcut to single-pass ZSTD_decompress_usingDDict()")
+ ip = istart + cSize;
+ op += decompressedSize;
+ zds->expected = 0;
+ zds->streamStage = zdss_init;
+ someMoreWork = 0;
+ break;
+ } }
+
+ /* Consume header (see ZSTDds_decodeFrameHeader) */
+ DEBUGLOG(4, "Consume header");
+ CHECK_F(ZSTD_decompressBegin_usingDDict(zds, zds->ddict));
+
+ if ((MEM_readLE32(zds->headerBuffer) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
+ zds->expected = MEM_readLE32(zds->headerBuffer + ZSTD_FRAMEIDSIZE);
+ zds->stage = ZSTDds_skipFrame;
+ } else {
+ CHECK_F(ZSTD_decodeFrameHeader(zds, zds->headerBuffer, zds->lhSize));
+ zds->expected = ZSTD_blockHeaderSize;
+ zds->stage = ZSTDds_decodeBlockHeader;
+ }
+
+ /* control buffer memory usage */
+ DEBUGLOG(4, "Control max memory usage (%u KB <= max %u KB)",
+ (U32)(zds->fParams.windowSize >>10),
+ (U32)(zds->maxWindowSize >> 10) );
+ zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN);
+ if (zds->fParams.windowSize > zds->maxWindowSize) return ERROR(frameParameter_windowTooLarge);
+
+ /* Adapt buffer sizes to frame header instructions */
+ { size_t const neededInBuffSize = MAX(zds->fParams.blockSizeMax, 4 /* frame checksum */);
+ size_t const neededOutBuffSize = ZSTD_decodingBufferSize_min(zds->fParams.windowSize, zds->fParams.frameContentSize);
+ if ((zds->inBuffSize < neededInBuffSize) || (zds->outBuffSize < neededOutBuffSize)) {
+ size_t const bufferSize = neededInBuffSize + neededOutBuffSize;
+ DEBUGLOG(4, "inBuff : from %u to %u",
+ (U32)zds->inBuffSize, (U32)neededInBuffSize);
+ DEBUGLOG(4, "outBuff : from %u to %u",
+ (U32)zds->outBuffSize, (U32)neededOutBuffSize);
+ if (zds->staticSize) { /* static DCtx */
+ DEBUGLOG(4, "staticSize : %u", (U32)zds->staticSize);
+ assert(zds->staticSize >= sizeof(ZSTD_DCtx)); /* controlled at init */
+ if (bufferSize > zds->staticSize - sizeof(ZSTD_DCtx))
+ return ERROR(memory_allocation);
+ } else {
+ ZSTD_free(zds->inBuff, zds->customMem);
+ zds->inBuffSize = 0;
+ zds->outBuffSize = 0;
+ zds->inBuff = (char*)ZSTD_malloc(bufferSize, zds->customMem);
+ if (zds->inBuff == NULL) return ERROR(memory_allocation);
+ }
+ zds->inBuffSize = neededInBuffSize;
+ zds->outBuff = zds->inBuff + zds->inBuffSize;
+ zds->outBuffSize = neededOutBuffSize;
+ } }
+ zds->streamStage = zdss_read;
+ /* fall-through */
+
+ case zdss_read:
+ DEBUGLOG(5, "stage zdss_read");
+ { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds);
+ DEBUGLOG(5, "neededInSize = %u", (U32)neededInSize);
+ if (neededInSize==0) { /* end of frame */
+ zds->streamStage = zdss_init;
+ someMoreWork = 0;
+ break;
+ }
+ if ((size_t)(iend-ip) >= neededInSize) { /* decode directly from src */
+ int const isSkipFrame = ZSTD_isSkipFrame(zds);
+ size_t const decodedSize = ZSTD_decompressContinue(zds,
+ zds->outBuff + zds->outStart, (isSkipFrame ? 0 : zds->outBuffSize - zds->outStart),
+ ip, neededInSize);
+ if (ZSTD_isError(decodedSize)) return decodedSize;
+ ip += neededInSize;
+ if (!decodedSize && !isSkipFrame) break; /* this was just a header */
+ zds->outEnd = zds->outStart + decodedSize;
+ zds->streamStage = zdss_flush;
+ break;
+ } }
+ if (ip==iend) { someMoreWork = 0; break; } /* no more input */
+ zds->streamStage = zdss_load;
+ /* fall-through */
+
+ case zdss_load:
+ { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds);
+ size_t const toLoad = neededInSize - zds->inPos;
+ int const isSkipFrame = ZSTD_isSkipFrame(zds);
+ size_t loadedSize;
+ if (isSkipFrame) {
+ loadedSize = MIN(toLoad, (size_t)(iend-ip));
+ } else {
+ if (toLoad > zds->inBuffSize - zds->inPos) return ERROR(corruption_detected); /* should never happen */
+ loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, iend-ip);
+ }
+ ip += loadedSize;
+ zds->inPos += loadedSize;
+ if (loadedSize < toLoad) { someMoreWork = 0; break; } /* not enough input, wait for more */
+
+ /* decode loaded input */
+ { size_t const decodedSize = ZSTD_decompressContinue(zds,
+ zds->outBuff + zds->outStart, zds->outBuffSize - zds->outStart,
+ zds->inBuff, neededInSize);
+ if (ZSTD_isError(decodedSize)) return decodedSize;
+ zds->inPos = 0; /* input is consumed */
+ if (!decodedSize && !isSkipFrame) { zds->streamStage = zdss_read; break; } /* this was just a header */
+ zds->outEnd = zds->outStart + decodedSize;
+ } }
+ zds->streamStage = zdss_flush;
+ /* fall-through */
+
+ case zdss_flush:
+ { size_t const toFlushSize = zds->outEnd - zds->outStart;
+ size_t const flushedSize = ZSTD_limitCopy(op, oend-op, zds->outBuff + zds->outStart, toFlushSize);
+ op += flushedSize;
+ zds->outStart += flushedSize;
+ if (flushedSize == toFlushSize) { /* flush completed */
+ zds->streamStage = zdss_read;
+ if ( (zds->outBuffSize < zds->fParams.frameContentSize)
+ && (zds->outStart + zds->fParams.blockSizeMax > zds->outBuffSize) ) {
+ DEBUGLOG(5, "restart filling outBuff from beginning (left:%i, needed:%u)",
+ (int)(zds->outBuffSize - zds->outStart),
+ (U32)zds->fParams.blockSizeMax);
+ zds->outStart = zds->outEnd = 0;
+ }
+ break;
+ } }
+ /* cannot complete flush */
+ someMoreWork = 0;
+ break;
+
+ default: return ERROR(GENERIC); /* impossible */
+ } }
+
+ /* result */
+ input->pos = (size_t)(ip - (const char*)(input->src));
+ output->pos = (size_t)(op - (char*)(output->dst));
+ if ((ip==istart) && (op==ostart)) { /* no forward progress */
+ zds->noForwardProgress ++;
+ if (zds->noForwardProgress >= ZSTD_NO_FORWARD_PROGRESS_MAX) {
+ if (op==oend) return ERROR(dstSize_tooSmall);
+ if (ip==iend) return ERROR(srcSize_wrong);
+ assert(0);
+ }
+ } else {
+ zds->noForwardProgress = 0;
+ }
+ { size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds);
+ if (!nextSrcSizeHint) { /* frame fully decoded */
+ if (zds->outEnd == zds->outStart) { /* output fully flushed */
+ if (zds->hostageByte) {
+ if (input->pos >= input->size) {
+ /* can't release hostage (not present) */
+ zds->streamStage = zdss_read;
+ return 1;
+ }
+ input->pos++; /* release hostage */
+ } /* zds->hostageByte */
+ return 0;
+ } /* zds->outEnd == zds->outStart */
+ if (!zds->hostageByte) { /* output not fully flushed; keep last byte as hostage; will be released when all output is flushed */
+ input->pos--; /* note : pos > 0, otherwise, impossible to finish reading last block */
+ zds->hostageByte=1;
+ }
+ return 1;
+ } /* nextSrcSizeHint==0 */
+ nextSrcSizeHint += ZSTD_blockHeaderSize * (ZSTD_nextInputType(zds) == ZSTDnit_block); /* preload header of next block */
+ assert(zds->inPos <= nextSrcSizeHint);
+ nextSrcSizeHint -= zds->inPos; /* part already loaded*/
+ return nextSrcSizeHint;
+ }
+}
+
+
+size_t ZSTD_decompress_generic(ZSTD_DCtx* dctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
+{
+ return ZSTD_decompressStream(dctx, output, input);
+}
+
+size_t ZSTD_decompress_generic_simpleArgs (
+ ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity, size_t* dstPos,
+ const void* src, size_t srcSize, size_t* srcPos)
+{
+ ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
+ ZSTD_inBuffer input = { src, srcSize, *srcPos };
+ /* ZSTD_compress_generic() will check validity of dstPos and srcPos */
+ size_t const cErr = ZSTD_decompress_generic(dctx, &output, &input);
+ *dstPos = output.pos;
+ *srcPos = input.pos;
+ return cErr;
+}
+
+void ZSTD_DCtx_reset(ZSTD_DCtx* dctx)
+{
+ (void)ZSTD_initDStream(dctx);
+ dctx->format = ZSTD_f_zstd1;
+ dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;
+}
diff --git a/grub-core/lib/zstd/zstd_errors.h b/grub-core/lib/zstd/zstd_errors.h
new file mode 100644
index 0000000..57533f2
--- /dev/null
+++ b/grub-core/lib/zstd/zstd_errors.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_ERRORS_H_398273423
+#define ZSTD_ERRORS_H_398273423
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/*===== dependency =====*/
+#include <stddef.h> /* size_t */
+
+
+/* ===== ZSTDERRORLIB_API : control library symbols visibility ===== */
+#ifndef ZSTDERRORLIB_VISIBILITY
+# if defined(__GNUC__) && (__GNUC__ >= 4)
+# define ZSTDERRORLIB_VISIBILITY __attribute__ ((visibility ("default")))
+# else
+# define ZSTDERRORLIB_VISIBILITY
+# endif
+#endif
+#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
+# define ZSTDERRORLIB_API __declspec(dllexport) ZSTDERRORLIB_VISIBILITY
+#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1)
+# define ZSTDERRORLIB_API __declspec(dllimport) ZSTDERRORLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
+#else
+# define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBILITY
+#endif
+
+/*-*********************************************
+ * Error codes list
+ *-*********************************************
+ * Error codes _values_ are pinned down since v1.3.1 only.
+ * Therefore, don't rely on values if you may link to any version < v1.3.1.
+ *
+ * Only values < 100 are considered stable.
+ *
+ * note 1 : this API shall be used with static linking only.
+ * dynamic linking is not yet officially supported.
+ * note 2 : Prefer relying on the enum than on its value whenever possible
+ * This is the only supported way to use the error list < v1.3.1
+ * note 3 : ZSTD_isError() is always correct, whatever the library version.
+ **********************************************/
+typedef enum {
+ ZSTD_error_no_error = 0,
+ ZSTD_error_GENERIC = 1,
+ ZSTD_error_prefix_unknown = 10,
+ ZSTD_error_version_unsupported = 12,
+ ZSTD_error_frameParameter_unsupported = 14,
+ ZSTD_error_frameParameter_windowTooLarge = 16,
+ ZSTD_error_corruption_detected = 20,
+ ZSTD_error_checksum_wrong = 22,
+ ZSTD_error_dictionary_corrupted = 30,
+ ZSTD_error_dictionary_wrong = 32,
+ ZSTD_error_dictionaryCreation_failed = 34,
+ ZSTD_error_parameter_unsupported = 40,
+ ZSTD_error_parameter_outOfBound = 42,
+ ZSTD_error_tableLog_tooLarge = 44,
+ ZSTD_error_maxSymbolValue_tooLarge = 46,
+ ZSTD_error_maxSymbolValue_tooSmall = 48,
+ ZSTD_error_stage_wrong = 60,
+ ZSTD_error_init_missing = 62,
+ ZSTD_error_memory_allocation = 64,
+ ZSTD_error_workSpace_tooSmall= 66,
+ ZSTD_error_dstSize_tooSmall = 70,
+ ZSTD_error_srcSize_wrong = 72,
+ /* following error codes are __NOT STABLE__, they can be removed or changed in future versions */
+ ZSTD_error_frameIndex_tooLarge = 100,
+ ZSTD_error_seekableIO = 102,
+ ZSTD_error_maxCode = 120 /* never EVER use this value directly, it can change in future versions! Use ZSTD_isError() instead */
+} ZSTD_ErrorCode;
+
+/*! ZSTD_getErrorCode() :
+ convert a `size_t` function result into a `ZSTD_ErrorCode` enum type,
+ which can be used to compare with enum list published above */
+ZSTDERRORLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult);
+ZSTDERRORLIB_API const char* ZSTD_getErrorString(ZSTD_ErrorCode code); /**< Same as ZSTD_getErrorName, but using a `ZSTD_ErrorCode` enum argument */
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ZSTD_ERRORS_H_398273423 */
diff --git a/grub-core/lib/zstd/zstd_internal.h b/grub-core/lib/zstd/zstd_internal.h
new file mode 100644
index 0000000..e75adfa
--- /dev/null
+++ b/grub-core/lib/zstd/zstd_internal.h
@@ -0,0 +1,257 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_CCOMMON_H_MODULE
+#define ZSTD_CCOMMON_H_MODULE
+
+/* this module contains definitions which must be identical
+ * across compression, decompression and dictBuilder.
+ * It also contains a few functions useful to at least 2 of them
+ * and which benefit from being inlined */
+
+/*-*************************************
+* Dependencies
+***************************************/
+#include "compiler.h"
+#include "mem.h"
+#include "debug.h" /* assert, DEBUGLOG, RAWLOG, g_debuglevel */
+#include "error_private.h"
+#define ZSTD_STATIC_LINKING_ONLY
+#include "zstd.h"
+#define FSE_STATIC_LINKING_ONLY
+#include "fse.h"
+#define HUF_STATIC_LINKING_ONLY
+#include "huf.h"
+#ifndef XXH_STATIC_LINKING_ONLY
+# define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */
+#endif
+#include "xxhash.h" /* XXH_reset, update, digest */
+
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/* ---- static assert (debug) --- */
+#define ZSTD_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c)
+
+
+/*-*************************************
+* shared macros
+***************************************/
+#undef MIN
+#undef MAX
+#define MIN(a,b) ((a)<(b) ? (a) : (b))
+#define MAX(a,b) ((a)>(b) ? (a) : (b))
+#define CHECK_F(f) { size_t const errcod = f; if (ERR_isError(errcod)) return errcod; } /* check and Forward error code */
+#define CHECK_E(f, e) { size_t const errcod = f; if (ERR_isError(errcod)) return ERROR(e); } /* check and send Error code */
+
+
+/*-*************************************
+* Common constants
+***************************************/
+#define ZSTD_OPT_NUM (1<<12)
+
+#define ZSTD_REP_NUM 3 /* number of repcodes */
+#define ZSTD_REP_MOVE (ZSTD_REP_NUM-1)
+static const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 };
+
+#define KB *(1 <<10)
+#define MB *(1 <<20)
+#define GB *(1U<<30)
+
+#define BIT7 128
+#define BIT6 64
+#define BIT5 32
+#define BIT4 16
+#define BIT1 2
+#define BIT0 1
+
+#define ZSTD_WINDOWLOG_ABSOLUTEMIN 10
+#define ZSTD_WINDOWLOG_DEFAULTMAX 27 /* Default maximum allowed window log */
+static const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 };
+static const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 };
+
+#define ZSTD_FRAMEIDSIZE 4 /* magic number size */
+
+#define ZSTD_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */
+static const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE;
+typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e;
+
+#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */
+#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */
+
+#define HufLog 12
+typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e;
+
+#define LONGNBSEQ 0x7F00
+
+#define MINMATCH 3
+
+#define Litbits 8
+#define MaxLit ((1<<Litbits) - 1)
+#define MaxML 52
+#define MaxLL 35
+#define DefaultMaxOff 28
+#define MaxOff 31
+#define MaxSeq MAX(MaxLL, MaxML) /* Assumption : MaxOff < MaxLL,MaxML */
+#define MLFSELog 9
+#define LLFSELog 9
+#define OffFSELog 8
+#define MaxFSELog MAX(MAX(MLFSELog, LLFSELog), OffFSELog)
+
+static const U32 LL_bits[MaxLL+1] = { 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 2, 2, 3, 3,
+ 4, 6, 7, 8, 9,10,11,12,
+ 13,14,15,16 };
+static const S16 LL_defaultNorm[MaxLL+1] = { 4, 3, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 3, 2, 1, 1, 1, 1, 1,
+ -1,-1,-1,-1 };
+#define LL_DEFAULTNORMLOG 6 /* for static allocation */
+static const U32 LL_defaultNormLog = LL_DEFAULTNORMLOG;
+
+static const U32 ML_bits[MaxML+1] = { 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 2, 2, 3, 3,
+ 4, 4, 5, 7, 8, 9,10,11,
+ 12,13,14,15,16 };
+static const S16 ML_defaultNorm[MaxML+1] = { 1, 4, 3, 2, 2, 2, 2, 2,
+ 2, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1,-1,-1,
+ -1,-1,-1,-1,-1 };
+#define ML_DEFAULTNORMLOG 6 /* for static allocation */
+static const U32 ML_defaultNormLog = ML_DEFAULTNORMLOG;
+
+static const S16 OF_defaultNorm[DefaultMaxOff+1] = { 1, 1, 1, 1, 1, 1, 2, 2,
+ 2, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ -1,-1,-1,-1,-1 };
+#define OF_DEFAULTNORMLOG 5 /* for static allocation */
+static const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG;
+
+
+/*-*******************************************
+* Shared functions to include for inlining
+*********************************************/
+static void ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
+#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
+
+/*! ZSTD_wildcopy() :
+ * custom version of memcpy(), can overwrite up to WILDCOPY_OVERLENGTH bytes (if length==0) */
+#define WILDCOPY_OVERLENGTH 8
+MEM_STATIC void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length)
+{
+ const BYTE* ip = (const BYTE*)src;
+ BYTE* op = (BYTE*)dst;
+ BYTE* const oend = op + length;
+ do
+ COPY8(op, ip)
+ while (op < oend);
+}
+
+MEM_STATIC void ZSTD_wildcopy_e(void* dst, const void* src, void* dstEnd) /* should be faster for decoding, but strangely, not verified on all platform */
+{
+ const BYTE* ip = (const BYTE*)src;
+ BYTE* op = (BYTE*)dst;
+ BYTE* const oend = (BYTE*)dstEnd;
+ do
+ COPY8(op, ip)
+ while (op < oend);
+}
+
+
+/*-*******************************************
+* Private declarations
+*********************************************/
+typedef struct seqDef_s {
+ U32 offset;
+ U16 litLength;
+ U16 matchLength;
+} seqDef;
+
+typedef struct {
+ seqDef* sequencesStart;
+ seqDef* sequences;
+ BYTE* litStart;
+ BYTE* lit;
+ BYTE* llCode;
+ BYTE* mlCode;
+ BYTE* ofCode;
+ size_t maxNbSeq;
+ size_t maxNbLit;
+ U32 longLengthID; /* 0 == no longLength; 1 == Lit.longLength; 2 == Match.longLength; */
+ U32 longLengthPos;
+} seqStore_t;
+
+const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx); /* compress & dictBuilder */
+void ZSTD_seqToCodes(const seqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */
+
+/* custom memory allocation functions */
+void* ZSTD_malloc(size_t size, ZSTD_customMem customMem);
+void* ZSTD_calloc(size_t size, ZSTD_customMem customMem);
+void ZSTD_free(void* ptr, ZSTD_customMem customMem);
+
+
+MEM_STATIC U32 ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus */
+{
+ assert(val != 0);
+ {
+# if defined(_MSC_VER) /* Visual */
+ unsigned long r=0;
+ _BitScanReverse(&r, val);
+ return (unsigned)r;
+# elif defined(__GNUC__) && (__GNUC__ >= 3) /* GCC Intrinsic */
+ return 31 - __builtin_clz(val);
+# else /* Software version */
+ static const U32 DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
+ U32 v = val;
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ return DeBruijnClz[(v * 0x07C4ACDDU) >> 27];
+# endif
+ }
+}
+
+
+/* ZSTD_invalidateRepCodes() :
+ * ensures next compression will not use repcodes from previous block.
+ * Note : only works with regular variant;
+ * do not use with extDict variant ! */
+void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx); /* zstdmt, adaptive_compression (shouldn't get this definition from here) */
+
+
+typedef struct {
+ blockType_e blockType;
+ U32 lastBlock;
+ U32 origSize;
+} blockProperties_t;
+
+/*! ZSTD_getcBlockSize() :
+ * Provides the size of compressed block from block header `src` */
+/* Used by: decompress, fullbench (does not get its definition from here) */
+size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
+ blockProperties_t* bpPtr);
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ZSTD_CCOMMON_H_MODULE */