From 2c3c1048746a4622d8c89a29670120dc8fab93c4 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 7 Apr 2024 20:49:45 +0200 Subject: Adding upstream version 6.1.76. Signed-off-by: Daniel Baumann --- lib/lz4/Makefile | 6 + lib/lz4/lz4_compress.c | 940 +++++++++++++++++++++++++++++++++++++++++++++++ lib/lz4/lz4_decompress.c | 720 ++++++++++++++++++++++++++++++++++++ lib/lz4/lz4defs.h | 247 +++++++++++++ lib/lz4/lz4hc_compress.c | 768 ++++++++++++++++++++++++++++++++++++++ 5 files changed, 2681 insertions(+) create mode 100644 lib/lz4/Makefile create mode 100644 lib/lz4/lz4_compress.c create mode 100644 lib/lz4/lz4_decompress.c create mode 100644 lib/lz4/lz4defs.h create mode 100644 lib/lz4/lz4hc_compress.c (limited to 'lib/lz4') diff --git a/lib/lz4/Makefile b/lib/lz4/Makefile new file mode 100644 index 000000000..5b42242af --- /dev/null +++ b/lib/lz4/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-only +ccflags-y += -O3 + +obj-$(CONFIG_LZ4_COMPRESS) += lz4_compress.o +obj-$(CONFIG_LZ4HC_COMPRESS) += lz4hc_compress.o +obj-$(CONFIG_LZ4_DECOMPRESS) += lz4_decompress.o diff --git a/lib/lz4/lz4_compress.c b/lib/lz4/lz4_compress.c new file mode 100644 index 000000000..90bb67994 --- /dev/null +++ b/lib/lz4/lz4_compress.c @@ -0,0 +1,940 @@ +/* + * LZ4 - Fast LZ compression algorithm + * Copyright (C) 2011 - 2016, Yann Collet. + * BSD 2 - Clause License (http://www.opensource.org/licenses/bsd - license.php) + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * You can contact the author at : + * - LZ4 homepage : http://www.lz4.org + * - LZ4 source repository : https://github.com/lz4/lz4 + * + * Changed for kernel usage by: + * Sven Schmidt <4sschmid@informatik.uni-hamburg.de> + */ + +/*-************************************ + * Dependencies + **************************************/ +#include +#include "lz4defs.h" +#include +#include +#include + +static const int LZ4_minLength = (MFLIMIT + 1); +static const int LZ4_64Klimit = ((64 * KB) + (MFLIMIT - 1)); + +/*-****************************** + * Compression functions + ********************************/ +static FORCE_INLINE U32 LZ4_hash4( + U32 sequence, + tableType_t const tableType) +{ + if (tableType == byU16) + return ((sequence * 2654435761U) + >> ((MINMATCH * 8) - (LZ4_HASHLOG + 1))); + else + return ((sequence * 2654435761U) + >> ((MINMATCH * 8) - LZ4_HASHLOG)); +} + +static FORCE_INLINE U32 LZ4_hash5( + U64 sequence, + tableType_t const tableType) +{ + const U32 hashLog = (tableType == byU16) + ? LZ4_HASHLOG + 1 + : LZ4_HASHLOG; + +#if LZ4_LITTLE_ENDIAN + static const U64 prime5bytes = 889523592379ULL; + + return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog)); +#else + static const U64 prime8bytes = 11400714785074694791ULL; + + return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog)); +#endif +} + +static FORCE_INLINE U32 LZ4_hashPosition( + const void *p, + tableType_t const tableType) +{ +#if LZ4_ARCH64 + if (tableType == byU32) + return LZ4_hash5(LZ4_read_ARCH(p), tableType); +#endif + + return LZ4_hash4(LZ4_read32(p), tableType); +} + +static void LZ4_putPositionOnHash( + const BYTE *p, + U32 h, + void *tableBase, + tableType_t const tableType, + const BYTE *srcBase) +{ + switch (tableType) { + case byPtr: + { + const BYTE **hashTable = (const BYTE **)tableBase; + + hashTable[h] = p; + return; + } + case byU32: + { + U32 *hashTable = (U32 *) tableBase; + + hashTable[h] = (U32)(p - srcBase); + return; + } + case byU16: + { + U16 *hashTable = (U16 *) tableBase; + + hashTable[h] = (U16)(p - srcBase); + return; + } + } +} + +static FORCE_INLINE void LZ4_putPosition( + const BYTE *p, + void *tableBase, + tableType_t tableType, + const BYTE *srcBase) +{ + U32 const h = LZ4_hashPosition(p, tableType); + + LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase); +} + +static const BYTE *LZ4_getPositionOnHash( + U32 h, + void *tableBase, + tableType_t tableType, + const BYTE *srcBase) +{ + if (tableType == byPtr) { + const BYTE **hashTable = (const BYTE **) tableBase; + + return hashTable[h]; + } + + if (tableType == byU32) { + const U32 * const hashTable = (U32 *) tableBase; + + return hashTable[h] + srcBase; + } + + { + /* default, to ensure a return */ + const U16 * const hashTable = (U16 *) tableBase; + + return hashTable[h] + srcBase; + } +} + +static FORCE_INLINE const BYTE *LZ4_getPosition( + const BYTE *p, + void *tableBase, + tableType_t tableType, + const BYTE *srcBase) +{ + U32 const h = LZ4_hashPosition(p, tableType); + + return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase); +} + + +/* + * LZ4_compress_generic() : + * inlined, to ensure branches are decided at compilation time + */ +static FORCE_INLINE int LZ4_compress_generic( + LZ4_stream_t_internal * const dictPtr, + const char * const source, + char * const dest, + const int inputSize, + const int maxOutputSize, + const limitedOutput_directive outputLimited, + const tableType_t tableType, + const dict_directive dict, + const dictIssue_directive dictIssue, + const U32 acceleration) +{ + const BYTE *ip = (const BYTE *) source; + const BYTE *base; + const BYTE *lowLimit; + const BYTE * const lowRefLimit = ip - dictPtr->dictSize; + const BYTE * const dictionary = dictPtr->dictionary; + const BYTE * const dictEnd = dictionary + dictPtr->dictSize; + const size_t dictDelta = dictEnd - (const BYTE *)source; + const BYTE *anchor = (const BYTE *) source; + const BYTE * const iend = ip + inputSize; + const BYTE * const mflimit = iend - MFLIMIT; + const BYTE * const matchlimit = iend - LASTLITERALS; + + BYTE *op = (BYTE *) dest; + BYTE * const olimit = op + maxOutputSize; + + U32 forwardH; + size_t refDelta = 0; + + /* Init conditions */ + if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) { + /* Unsupported inputSize, too large (or negative) */ + return 0; + } + + switch (dict) { + case noDict: + default: + base = (const BYTE *)source; + lowLimit = (const BYTE *)source; + break; + case withPrefix64k: + base = (const BYTE *)source - dictPtr->currentOffset; + lowLimit = (const BYTE *)source - dictPtr->dictSize; + break; + case usingExtDict: + base = (const BYTE *)source - dictPtr->currentOffset; + lowLimit = (const BYTE *)source; + break; + } + + if ((tableType == byU16) + && (inputSize >= LZ4_64Klimit)) { + /* Size too large (not within 64K limit) */ + return 0; + } + + if (inputSize < LZ4_minLength) { + /* Input too small, no compression (all literals) */ + goto _last_literals; + } + + /* First Byte */ + LZ4_putPosition(ip, dictPtr->hashTable, tableType, base); + ip++; + forwardH = LZ4_hashPosition(ip, tableType); + + /* Main Loop */ + for ( ; ; ) { + const BYTE *match; + BYTE *token; + + /* Find a match */ + { + const BYTE *forwardIp = ip; + unsigned int step = 1; + unsigned int searchMatchNb = acceleration << LZ4_SKIPTRIGGER; + + do { + U32 const h = forwardH; + + ip = forwardIp; + forwardIp += step; + step = (searchMatchNb++ >> LZ4_SKIPTRIGGER); + + if (unlikely(forwardIp > mflimit)) + goto _last_literals; + + match = LZ4_getPositionOnHash(h, + dictPtr->hashTable, + tableType, base); + + if (dict == usingExtDict) { + if (match < (const BYTE *)source) { + refDelta = dictDelta; + lowLimit = dictionary; + } else { + refDelta = 0; + lowLimit = (const BYTE *)source; + } } + + forwardH = LZ4_hashPosition(forwardIp, + tableType); + + LZ4_putPositionOnHash(ip, h, dictPtr->hashTable, + tableType, base); + } while (((dictIssue == dictSmall) + ? (match < lowRefLimit) + : 0) + || ((tableType == byU16) + ? 0 + : (match + MAX_DISTANCE < ip)) + || (LZ4_read32(match + refDelta) + != LZ4_read32(ip))); + } + + /* Catch up */ + while (((ip > anchor) & (match + refDelta > lowLimit)) + && (unlikely(ip[-1] == match[refDelta - 1]))) { + ip--; + match--; + } + + /* Encode Literals */ + { + unsigned const int litLength = (unsigned int)(ip - anchor); + + token = op++; + + if ((outputLimited) && + /* Check output buffer overflow */ + (unlikely(op + litLength + + (2 + 1 + LASTLITERALS) + + (litLength / 255) > olimit))) + return 0; + + if (litLength >= RUN_MASK) { + int len = (int)litLength - RUN_MASK; + + *token = (RUN_MASK << ML_BITS); + + for (; len >= 255; len -= 255) + *op++ = 255; + *op++ = (BYTE)len; + } else + *token = (BYTE)(litLength << ML_BITS); + + /* Copy Literals */ + LZ4_wildCopy(op, anchor, op + litLength); + op += litLength; + } + +_next_match: + /* Encode Offset */ + LZ4_writeLE16(op, (U16)(ip - match)); + op += 2; + + /* Encode MatchLength */ + { + unsigned int matchCode; + + if ((dict == usingExtDict) + && (lowLimit == dictionary)) { + const BYTE *limit; + + match += refDelta; + limit = ip + (dictEnd - match); + + if (limit > matchlimit) + limit = matchlimit; + + matchCode = LZ4_count(ip + MINMATCH, + match + MINMATCH, limit); + + ip += MINMATCH + matchCode; + + if (ip == limit) { + unsigned const int more = LZ4_count(ip, + (const BYTE *)source, + matchlimit); + + matchCode += more; + ip += more; + } + } else { + matchCode = LZ4_count(ip + MINMATCH, + match + MINMATCH, matchlimit); + ip += MINMATCH + matchCode; + } + + if (outputLimited && + /* Check output buffer overflow */ + (unlikely(op + + (1 + LASTLITERALS) + + (matchCode >> 8) > olimit))) + return 0; + + if (matchCode >= ML_MASK) { + *token += ML_MASK; + matchCode -= ML_MASK; + LZ4_write32(op, 0xFFFFFFFF); + + while (matchCode >= 4 * 255) { + op += 4; + LZ4_write32(op, 0xFFFFFFFF); + matchCode -= 4 * 255; + } + + op += matchCode / 255; + *op++ = (BYTE)(matchCode % 255); + } else + *token += (BYTE)(matchCode); + } + + anchor = ip; + + /* Test end of chunk */ + if (ip > mflimit) + break; + + /* Fill table */ + LZ4_putPosition(ip - 2, dictPtr->hashTable, tableType, base); + + /* Test next position */ + match = LZ4_getPosition(ip, dictPtr->hashTable, + tableType, base); + + if (dict == usingExtDict) { + if (match < (const BYTE *)source) { + refDelta = dictDelta; + lowLimit = dictionary; + } else { + refDelta = 0; + lowLimit = (const BYTE *)source; + } + } + + LZ4_putPosition(ip, dictPtr->hashTable, tableType, base); + + if (((dictIssue == dictSmall) ? (match >= lowRefLimit) : 1) + && (match + MAX_DISTANCE >= ip) + && (LZ4_read32(match + refDelta) == LZ4_read32(ip))) { + token = op++; + *token = 0; + goto _next_match; + } + + /* Prepare next loop */ + forwardH = LZ4_hashPosition(++ip, tableType); + } + +_last_literals: + /* Encode Last Literals */ + { + size_t const lastRun = (size_t)(iend - anchor); + + if ((outputLimited) && + /* Check output buffer overflow */ + ((op - (BYTE *)dest) + lastRun + 1 + + ((lastRun + 255 - RUN_MASK) / 255) > (U32)maxOutputSize)) + return 0; + + if (lastRun >= RUN_MASK) { + size_t accumulator = lastRun - RUN_MASK; + *op++ = RUN_MASK << ML_BITS; + for (; accumulator >= 255; accumulator -= 255) + *op++ = 255; + *op++ = (BYTE) accumulator; + } else { + *op++ = (BYTE)(lastRun << ML_BITS); + } + + LZ4_memcpy(op, anchor, lastRun); + + op += lastRun; + } + + /* End */ + return (int) (((char *)op) - dest); +} + +static int LZ4_compress_fast_extState( + void *state, + const char *source, + char *dest, + int inputSize, + int maxOutputSize, + int acceleration) +{ + LZ4_stream_t_internal *ctx = &((LZ4_stream_t *)state)->internal_donotuse; +#if LZ4_ARCH64 + const tableType_t tableType = byU32; +#else + const tableType_t tableType = byPtr; +#endif + + LZ4_resetStream((LZ4_stream_t *)state); + + if (acceleration < 1) + acceleration = LZ4_ACCELERATION_DEFAULT; + + if (maxOutputSize >= LZ4_COMPRESSBOUND(inputSize)) { + if (inputSize < LZ4_64Klimit) + return LZ4_compress_generic(ctx, source, + dest, inputSize, 0, + noLimit, byU16, noDict, + noDictIssue, acceleration); + else + return LZ4_compress_generic(ctx, source, + dest, inputSize, 0, + noLimit, tableType, noDict, + noDictIssue, acceleration); + } else { + if (inputSize < LZ4_64Klimit) + return LZ4_compress_generic(ctx, source, + dest, inputSize, + maxOutputSize, limitedOutput, byU16, noDict, + noDictIssue, acceleration); + else + return LZ4_compress_generic(ctx, source, + dest, inputSize, + maxOutputSize, limitedOutput, tableType, noDict, + noDictIssue, acceleration); + } +} + +int LZ4_compress_fast(const char *source, char *dest, int inputSize, + int maxOutputSize, int acceleration, void *wrkmem) +{ + return LZ4_compress_fast_extState(wrkmem, source, dest, inputSize, + maxOutputSize, acceleration); +} +EXPORT_SYMBOL(LZ4_compress_fast); + +int LZ4_compress_default(const char *source, char *dest, int inputSize, + int maxOutputSize, void *wrkmem) +{ + return LZ4_compress_fast(source, dest, inputSize, + maxOutputSize, LZ4_ACCELERATION_DEFAULT, wrkmem); +} +EXPORT_SYMBOL(LZ4_compress_default); + +/*-****************************** + * *_destSize() variant + ********************************/ +static int LZ4_compress_destSize_generic( + LZ4_stream_t_internal * const ctx, + const char * const src, + char * const dst, + int * const srcSizePtr, + const int targetDstSize, + const tableType_t tableType) +{ + const BYTE *ip = (const BYTE *) src; + const BYTE *base = (const BYTE *) src; + const BYTE *lowLimit = (const BYTE *) src; + const BYTE *anchor = ip; + const BYTE * const iend = ip + *srcSizePtr; + const BYTE * const mflimit = iend - MFLIMIT; + const BYTE * const matchlimit = iend - LASTLITERALS; + + BYTE *op = (BYTE *) dst; + BYTE * const oend = op + targetDstSize; + BYTE * const oMaxLit = op + targetDstSize - 2 /* offset */ + - 8 /* because 8 + MINMATCH == MFLIMIT */ - 1 /* token */; + BYTE * const oMaxMatch = op + targetDstSize + - (LASTLITERALS + 1 /* token */); + BYTE * const oMaxSeq = oMaxLit - 1 /* token */; + + U32 forwardH; + + /* Init conditions */ + /* Impossible to store anything */ + if (targetDstSize < 1) + return 0; + /* Unsupported input size, too large (or negative) */ + if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) + return 0; + /* Size too large (not within 64K limit) */ + if ((tableType == byU16) && (*srcSizePtr >= LZ4_64Klimit)) + return 0; + /* Input too small, no compression (all literals) */ + if (*srcSizePtr < LZ4_minLength) + goto _last_literals; + + /* First Byte */ + *srcSizePtr = 0; + LZ4_putPosition(ip, ctx->hashTable, tableType, base); + ip++; forwardH = LZ4_hashPosition(ip, tableType); + + /* Main Loop */ + for ( ; ; ) { + const BYTE *match; + BYTE *token; + + /* Find a match */ + { + const BYTE *forwardIp = ip; + unsigned int step = 1; + unsigned int searchMatchNb = 1 << LZ4_SKIPTRIGGER; + + do { + U32 h = forwardH; + + ip = forwardIp; + forwardIp += step; + step = (searchMatchNb++ >> LZ4_SKIPTRIGGER); + + if (unlikely(forwardIp > mflimit)) + goto _last_literals; + + match = LZ4_getPositionOnHash(h, ctx->hashTable, + tableType, base); + forwardH = LZ4_hashPosition(forwardIp, + tableType); + LZ4_putPositionOnHash(ip, h, + ctx->hashTable, tableType, + base); + + } while (((tableType == byU16) + ? 0 + : (match + MAX_DISTANCE < ip)) + || (LZ4_read32(match) != LZ4_read32(ip))); + } + + /* Catch up */ + while ((ip > anchor) + && (match > lowLimit) + && (unlikely(ip[-1] == match[-1]))) { + ip--; + match--; + } + + /* Encode Literal length */ + { + unsigned int litLength = (unsigned int)(ip - anchor); + + token = op++; + if (op + ((litLength + 240) / 255) + + litLength > oMaxLit) { + /* Not enough space for a last match */ + op--; + goto _last_literals; + } + if (litLength >= RUN_MASK) { + unsigned int len = litLength - RUN_MASK; + *token = (RUN_MASK<= 255; len -= 255) + *op++ = 255; + *op++ = (BYTE)len; + } else + *token = (BYTE)(litLength << ML_BITS); + + /* Copy Literals */ + LZ4_wildCopy(op, anchor, op + litLength); + op += litLength; + } + +_next_match: + /* Encode Offset */ + LZ4_writeLE16(op, (U16)(ip - match)); op += 2; + + /* Encode MatchLength */ + { + size_t matchLength = LZ4_count(ip + MINMATCH, + match + MINMATCH, matchlimit); + + if (op + ((matchLength + 240)/255) > oMaxMatch) { + /* Match description too long : reduce it */ + matchLength = (15 - 1) + (oMaxMatch - op) * 255; + } + ip += MINMATCH + matchLength; + + if (matchLength >= ML_MASK) { + *token += ML_MASK; + matchLength -= ML_MASK; + while (matchLength >= 255) { + matchLength -= 255; + *op++ = 255; + } + *op++ = (BYTE)matchLength; + } else + *token += (BYTE)(matchLength); + } + + anchor = ip; + + /* Test end of block */ + if (ip > mflimit) + break; + if (op > oMaxSeq) + break; + + /* Fill table */ + LZ4_putPosition(ip - 2, ctx->hashTable, tableType, base); + + /* Test next position */ + match = LZ4_getPosition(ip, ctx->hashTable, tableType, base); + LZ4_putPosition(ip, ctx->hashTable, tableType, base); + + if ((match + MAX_DISTANCE >= ip) + && (LZ4_read32(match) == LZ4_read32(ip))) { + token = op++; *token = 0; + goto _next_match; + } + + /* Prepare next loop */ + forwardH = LZ4_hashPosition(++ip, tableType); + } + +_last_literals: + /* Encode Last Literals */ + { + size_t lastRunSize = (size_t)(iend - anchor); + + if (op + 1 /* token */ + + ((lastRunSize + 240) / 255) /* litLength */ + + lastRunSize /* literals */ > oend) { + /* adapt lastRunSize to fill 'dst' */ + lastRunSize = (oend - op) - 1; + lastRunSize -= (lastRunSize + 240) / 255; + } + ip = anchor + lastRunSize; + + if (lastRunSize >= RUN_MASK) { + size_t accumulator = lastRunSize - RUN_MASK; + + *op++ = RUN_MASK << ML_BITS; + for (; accumulator >= 255; accumulator -= 255) + *op++ = 255; + *op++ = (BYTE) accumulator; + } else { + *op++ = (BYTE)(lastRunSize<= LZ4_COMPRESSBOUND(*srcSizePtr)) { + /* compression success is guaranteed */ + return LZ4_compress_fast_extState( + state, src, dst, *srcSizePtr, + targetDstSize, 1); + } else { + if (*srcSizePtr < LZ4_64Klimit) + return LZ4_compress_destSize_generic( + &state->internal_donotuse, + src, dst, srcSizePtr, + targetDstSize, byU16); + else + return LZ4_compress_destSize_generic( + &state->internal_donotuse, + src, dst, srcSizePtr, + targetDstSize, tableType); + } +} + + +int LZ4_compress_destSize( + const char *src, + char *dst, + int *srcSizePtr, + int targetDstSize, + void *wrkmem) +{ + return LZ4_compress_destSize_extState(wrkmem, src, dst, srcSizePtr, + targetDstSize); +} +EXPORT_SYMBOL(LZ4_compress_destSize); + +/*-****************************** + * Streaming functions + ********************************/ +void LZ4_resetStream(LZ4_stream_t *LZ4_stream) +{ + memset(LZ4_stream, 0, sizeof(LZ4_stream_t)); +} + +int LZ4_loadDict(LZ4_stream_t *LZ4_dict, + const char *dictionary, int dictSize) +{ + LZ4_stream_t_internal *dict = &LZ4_dict->internal_donotuse; + const BYTE *p = (const BYTE *)dictionary; + const BYTE * const dictEnd = p + dictSize; + const BYTE *base; + + if ((dict->initCheck) + || (dict->currentOffset > 1 * GB)) { + /* Uninitialized structure, or reuse overflow */ + LZ4_resetStream(LZ4_dict); + } + + if (dictSize < (int)HASH_UNIT) { + dict->dictionary = NULL; + dict->dictSize = 0; + return 0; + } + + if ((dictEnd - p) > 64 * KB) + p = dictEnd - 64 * KB; + dict->currentOffset += 64 * KB; + base = p - dict->currentOffset; + dict->dictionary = p; + dict->dictSize = (U32)(dictEnd - p); + dict->currentOffset += dict->dictSize; + + while (p <= dictEnd - HASH_UNIT) { + LZ4_putPosition(p, dict->hashTable, byU32, base); + p += 3; + } + + return dict->dictSize; +} +EXPORT_SYMBOL(LZ4_loadDict); + +static void LZ4_renormDictT(LZ4_stream_t_internal *LZ4_dict, + const BYTE *src) +{ + if ((LZ4_dict->currentOffset > 0x80000000) || + ((uptrval)LZ4_dict->currentOffset > (uptrval)src)) { + /* address space overflow */ + /* rescale hash table */ + U32 const delta = LZ4_dict->currentOffset - 64 * KB; + const BYTE *dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize; + int i; + + for (i = 0; i < LZ4_HASH_SIZE_U32; i++) { + if (LZ4_dict->hashTable[i] < delta) + LZ4_dict->hashTable[i] = 0; + else + LZ4_dict->hashTable[i] -= delta; + } + LZ4_dict->currentOffset = 64 * KB; + if (LZ4_dict->dictSize > 64 * KB) + LZ4_dict->dictSize = 64 * KB; + LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize; + } +} + +int LZ4_saveDict(LZ4_stream_t *LZ4_dict, char *safeBuffer, int dictSize) +{ + LZ4_stream_t_internal * const dict = &LZ4_dict->internal_donotuse; + const BYTE * const previousDictEnd = dict->dictionary + dict->dictSize; + + if ((U32)dictSize > 64 * KB) { + /* useless to define a dictionary > 64 * KB */ + dictSize = 64 * KB; + } + if ((U32)dictSize > dict->dictSize) + dictSize = dict->dictSize; + + memmove(safeBuffer, previousDictEnd - dictSize, dictSize); + + dict->dictionary = (const BYTE *)safeBuffer; + dict->dictSize = (U32)dictSize; + + return dictSize; +} +EXPORT_SYMBOL(LZ4_saveDict); + +int LZ4_compress_fast_continue(LZ4_stream_t *LZ4_stream, const char *source, + char *dest, int inputSize, int maxOutputSize, int acceleration) +{ + LZ4_stream_t_internal *streamPtr = &LZ4_stream->internal_donotuse; + const BYTE * const dictEnd = streamPtr->dictionary + + streamPtr->dictSize; + + const BYTE *smallest = (const BYTE *) source; + + if (streamPtr->initCheck) { + /* Uninitialized structure detected */ + return 0; + } + + if ((streamPtr->dictSize > 0) && (smallest > dictEnd)) + smallest = dictEnd; + + LZ4_renormDictT(streamPtr, smallest); + + if (acceleration < 1) + acceleration = LZ4_ACCELERATION_DEFAULT; + + /* Check overlapping input/dictionary space */ + { + const BYTE *sourceEnd = (const BYTE *) source + inputSize; + + if ((sourceEnd > streamPtr->dictionary) + && (sourceEnd < dictEnd)) { + streamPtr->dictSize = (U32)(dictEnd - sourceEnd); + if (streamPtr->dictSize > 64 * KB) + streamPtr->dictSize = 64 * KB; + if (streamPtr->dictSize < 4) + streamPtr->dictSize = 0; + streamPtr->dictionary = dictEnd - streamPtr->dictSize; + } + } + + /* prefix mode : source data follows dictionary */ + if (dictEnd == (const BYTE *)source) { + int result; + + if ((streamPtr->dictSize < 64 * KB) && + (streamPtr->dictSize < streamPtr->currentOffset)) { + result = LZ4_compress_generic( + streamPtr, source, dest, inputSize, + maxOutputSize, limitedOutput, byU32, + withPrefix64k, dictSmall, acceleration); + } else { + result = LZ4_compress_generic( + streamPtr, source, dest, inputSize, + maxOutputSize, limitedOutput, byU32, + withPrefix64k, noDictIssue, acceleration); + } + streamPtr->dictSize += (U32)inputSize; + streamPtr->currentOffset += (U32)inputSize; + return result; + } + + /* external dictionary mode */ + { + int result; + + if ((streamPtr->dictSize < 64 * KB) && + (streamPtr->dictSize < streamPtr->currentOffset)) { + result = LZ4_compress_generic( + streamPtr, source, dest, inputSize, + maxOutputSize, limitedOutput, byU32, + usingExtDict, dictSmall, acceleration); + } else { + result = LZ4_compress_generic( + streamPtr, source, dest, inputSize, + maxOutputSize, limitedOutput, byU32, + usingExtDict, noDictIssue, acceleration); + } + streamPtr->dictionary = (const BYTE *)source; + streamPtr->dictSize = (U32)inputSize; + streamPtr->currentOffset += (U32)inputSize; + return result; + } +} +EXPORT_SYMBOL(LZ4_compress_fast_continue); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("LZ4 compressor"); diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c new file mode 100644 index 000000000..59fe69a63 --- /dev/null +++ b/lib/lz4/lz4_decompress.c @@ -0,0 +1,720 @@ +/* + * LZ4 - Fast LZ compression algorithm + * Copyright (C) 2011 - 2016, Yann Collet. + * BSD 2 - Clause License (http://www.opensource.org/licenses/bsd - license.php) + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * You can contact the author at : + * - LZ4 homepage : http://www.lz4.org + * - LZ4 source repository : https://github.com/lz4/lz4 + * + * Changed for kernel usage by: + * Sven Schmidt <4sschmid@informatik.uni-hamburg.de> + */ + +/*-************************************ + * Dependencies + **************************************/ +#include +#include "lz4defs.h" +#include +#include +#include +#include + +/*-***************************** + * Decompression functions + *******************************/ + +#define DEBUGLOG(l, ...) {} /* disabled */ + +#ifndef assert +#define assert(condition) ((void)0) +#endif + +/* + * LZ4_decompress_generic() : + * This generic decompression function covers all use cases. + * It shall be instantiated several times, using different sets of directives. + * Note that it is important for performance that this function really get inlined, + * in order to remove useless branches during compilation optimization. + */ +static FORCE_INLINE int LZ4_decompress_generic( + const char * const src, + char * const dst, + int srcSize, + /* + * If endOnInput == endOnInputSize, + * this value is `dstCapacity` + */ + int outputSize, + /* endOnOutputSize, endOnInputSize */ + endCondition_directive endOnInput, + /* full, partial */ + earlyEnd_directive partialDecoding, + /* noDict, withPrefix64k, usingExtDict */ + dict_directive dict, + /* always <= dst, == dst when no prefix */ + const BYTE * const lowPrefix, + /* only if dict == usingExtDict */ + const BYTE * const dictStart, + /* note : = 0 if noDict */ + const size_t dictSize + ) +{ + const BYTE *ip = (const BYTE *) src; + const BYTE * const iend = ip + srcSize; + + BYTE *op = (BYTE *) dst; + BYTE * const oend = op + outputSize; + BYTE *cpy; + + const BYTE * const dictEnd = (const BYTE *)dictStart + dictSize; + static const unsigned int inc32table[8] = {0, 1, 2, 1, 0, 4, 4, 4}; + static const int dec64table[8] = {0, 0, 0, -1, -4, 1, 2, 3}; + + const int safeDecode = (endOnInput == endOnInputSize); + const int checkOffset = ((safeDecode) && (dictSize < (int)(64 * KB))); + + /* Set up the "end" pointers for the shortcut. */ + const BYTE *const shortiend = iend - + (endOnInput ? 14 : 8) /*maxLL*/ - 2 /*offset*/; + const BYTE *const shortoend = oend - + (endOnInput ? 14 : 8) /*maxLL*/ - 18 /*maxML*/; + + DEBUGLOG(5, "%s (srcSize:%i, dstSize:%i)", __func__, + srcSize, outputSize); + + /* Special cases */ + assert(lowPrefix <= op); + assert(src != NULL); + + /* Empty output buffer */ + if ((endOnInput) && (unlikely(outputSize == 0))) + return ((srcSize == 1) && (*ip == 0)) ? 0 : -1; + + if ((!endOnInput) && (unlikely(outputSize == 0))) + return (*ip == 0 ? 1 : -1); + + if ((endOnInput) && unlikely(srcSize == 0)) + return -1; + + /* Main Loop : decode sequences */ + while (1) { + size_t length; + const BYTE *match; + size_t offset; + + /* get literal length */ + unsigned int const token = *ip++; + length = token>>ML_BITS; + + /* ip < iend before the increment */ + assert(!endOnInput || ip <= iend); + + /* + * A two-stage shortcut for the most common case: + * 1) If the literal length is 0..14, and there is enough + * space, enter the shortcut and copy 16 bytes on behalf + * of the literals (in the fast mode, only 8 bytes can be + * safely copied this way). + * 2) Further if the match length is 4..18, copy 18 bytes + * in a similar manner; but we ensure that there's enough + * space in the output for those 18 bytes earlier, upon + * entering the shortcut (in other words, there is a + * combined check for both stages). + * + * The & in the likely() below is intentionally not && so that + * some compilers can produce better parallelized runtime code + */ + if ((endOnInput ? length != RUN_MASK : length <= 8) + /* + * strictly "less than" on input, to re-enter + * the loop with at least one byte + */ + && likely((endOnInput ? ip < shortiend : 1) & + (op <= shortoend))) { + /* Copy the literals */ + LZ4_memcpy(op, ip, endOnInput ? 16 : 8); + op += length; ip += length; + + /* + * The second stage: + * prepare for match copying, decode full info. + * If it doesn't work out, the info won't be wasted. + */ + length = token & ML_MASK; /* match length */ + offset = LZ4_readLE16(ip); + ip += 2; + match = op - offset; + assert(match <= op); /* check overflow */ + + /* Do not deal with overlapping matches. */ + if ((length != ML_MASK) && + (offset >= 8) && + (dict == withPrefix64k || match >= lowPrefix)) { + /* Copy the match. */ + LZ4_memcpy(op + 0, match + 0, 8); + LZ4_memcpy(op + 8, match + 8, 8); + LZ4_memcpy(op + 16, match + 16, 2); + op += length + MINMATCH; + /* Both stages worked, load the next token. */ + continue; + } + + /* + * The second stage didn't work out, but the info + * is ready. Propel it right to the point of match + * copying. + */ + goto _copy_match; + } + + /* decode literal length */ + if (length == RUN_MASK) { + unsigned int s; + + if (unlikely(endOnInput ? ip >= iend - RUN_MASK : 0)) { + /* overflow detection */ + goto _output_error; + } + do { + s = *ip++; + length += s; + } while (likely(endOnInput + ? ip < iend - RUN_MASK + : 1) & (s == 255)); + + if ((safeDecode) + && unlikely((uptrval)(op) + + length < (uptrval)(op))) { + /* overflow detection */ + goto _output_error; + } + if ((safeDecode) + && unlikely((uptrval)(ip) + + length < (uptrval)(ip))) { + /* overflow detection */ + goto _output_error; + } + } + + /* copy literals */ + cpy = op + length; + LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH); + + if (((endOnInput) && ((cpy > oend - MFLIMIT) + || (ip + length > iend - (2 + 1 + LASTLITERALS)))) + || ((!endOnInput) && (cpy > oend - WILDCOPYLENGTH))) { + if (partialDecoding) { + if (cpy > oend) { + /* + * Partial decoding : + * stop in the middle of literal segment + */ + cpy = oend; + length = oend - op; + } + if ((endOnInput) + && (ip + length > iend)) { + /* + * Error : + * read attempt beyond + * end of input buffer + */ + goto _output_error; + } + } else { + if ((!endOnInput) + && (cpy != oend)) { + /* + * Error : + * block decoding must + * stop exactly there + */ + goto _output_error; + } + if ((endOnInput) + && ((ip + length != iend) + || (cpy > oend))) { + /* + * Error : + * input must be consumed + */ + goto _output_error; + } + } + + /* + * supports overlapping memory regions; only matters + * for in-place decompression scenarios + */ + LZ4_memmove(op, ip, length); + ip += length; + op += length; + + /* Necessarily EOF when !partialDecoding. + * When partialDecoding, it is EOF if we've either + * filled the output buffer or + * can't proceed with reading an offset for following match. + */ + if (!partialDecoding || (cpy == oend) || (ip >= (iend - 2))) + break; + } else { + /* may overwrite up to WILDCOPYLENGTH beyond cpy */ + LZ4_wildCopy(op, ip, cpy); + ip += length; + op = cpy; + } + + /* get offset */ + offset = LZ4_readLE16(ip); + ip += 2; + match = op - offset; + + /* get matchlength */ + length = token & ML_MASK; + +_copy_match: + if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) { + /* Error : offset outside buffers */ + goto _output_error; + } + + /* costs ~1%; silence an msan warning when offset == 0 */ + /* + * note : when partialDecoding, there is no guarantee that + * at least 4 bytes remain available in output buffer + */ + if (!partialDecoding) { + assert(oend > op); + assert(oend - op >= 4); + + LZ4_write32(op, (U32)offset); + } + + if (length == ML_MASK) { + unsigned int s; + + do { + s = *ip++; + + if ((endOnInput) && (ip > iend - LASTLITERALS)) + goto _output_error; + + length += s; + } while (s == 255); + + if ((safeDecode) + && unlikely( + (uptrval)(op) + length < (uptrval)op)) { + /* overflow detection */ + goto _output_error; + } + } + + length += MINMATCH; + + /* match starting within external dictionary */ + if ((dict == usingExtDict) && (match < lowPrefix)) { + if (unlikely(op + length > oend - LASTLITERALS)) { + /* doesn't respect parsing restriction */ + if (!partialDecoding) + goto _output_error; + length = min(length, (size_t)(oend - op)); + } + + if (length <= (size_t)(lowPrefix - match)) { + /* + * match fits entirely within external + * dictionary : just copy + */ + memmove(op, dictEnd - (lowPrefix - match), + length); + op += length; + } else { + /* + * match stretches into both external + * dictionary and current block + */ + size_t const copySize = (size_t)(lowPrefix - match); + size_t const restSize = length - copySize; + + LZ4_memcpy(op, dictEnd - copySize, copySize); + op += copySize; + if (restSize > (size_t)(op - lowPrefix)) { + /* overlap copy */ + BYTE * const endOfMatch = op + restSize; + const BYTE *copyFrom = lowPrefix; + + while (op < endOfMatch) + *op++ = *copyFrom++; + } else { + LZ4_memcpy(op, lowPrefix, restSize); + op += restSize; + } + } + continue; + } + + /* copy match within block */ + cpy = op + length; + + /* + * partialDecoding : + * may not respect endBlock parsing restrictions + */ + assert(op <= oend); + if (partialDecoding && + (cpy > oend - MATCH_SAFEGUARD_DISTANCE)) { + size_t const mlen = min(length, (size_t)(oend - op)); + const BYTE * const matchEnd = match + mlen; + BYTE * const copyEnd = op + mlen; + + if (matchEnd > op) { + /* overlap copy */ + while (op < copyEnd) + *op++ = *match++; + } else { + LZ4_memcpy(op, match, mlen); + } + op = copyEnd; + if (op == oend) + break; + continue; + } + + if (unlikely(offset < 8)) { + op[0] = match[0]; + op[1] = match[1]; + op[2] = match[2]; + op[3] = match[3]; + match += inc32table[offset]; + LZ4_memcpy(op + 4, match, 4); + match -= dec64table[offset]; + } else { + LZ4_copy8(op, match); + match += 8; + } + + op += 8; + + if (unlikely(cpy > oend - MATCH_SAFEGUARD_DISTANCE)) { + BYTE * const oCopyLimit = oend - (WILDCOPYLENGTH - 1); + + if (cpy > oend - LASTLITERALS) { + /* + * Error : last LASTLITERALS bytes + * must be literals (uncompressed) + */ + goto _output_error; + } + + if (op < oCopyLimit) { + LZ4_wildCopy(op, match, oCopyLimit); + match += oCopyLimit - op; + op = oCopyLimit; + } + while (op < cpy) + *op++ = *match++; + } else { + LZ4_copy8(op, match); + if (length > 16) + LZ4_wildCopy(op + 8, match + 8, cpy); + } + op = cpy; /* wildcopy correction */ + } + + /* end of decoding */ + if (endOnInput) { + /* Nb of output bytes decoded */ + return (int) (((char *)op) - dst); + } else { + /* Nb of input bytes read */ + return (int) (((const char *)ip) - src); + } + + /* Overflow error detected */ +_output_error: + return (int) (-(((const char *)ip) - src)) - 1; +} + +int LZ4_decompress_safe(const char *source, char *dest, + int compressedSize, int maxDecompressedSize) +{ + return LZ4_decompress_generic(source, dest, + compressedSize, maxDecompressedSize, + endOnInputSize, decode_full_block, + noDict, (BYTE *)dest, NULL, 0); +} + +int LZ4_decompress_safe_partial(const char *src, char *dst, + int compressedSize, int targetOutputSize, int dstCapacity) +{ + dstCapacity = min(targetOutputSize, dstCapacity); + return LZ4_decompress_generic(src, dst, compressedSize, dstCapacity, + endOnInputSize, partial_decode, + noDict, (BYTE *)dst, NULL, 0); +} + +int LZ4_decompress_fast(const char *source, char *dest, int originalSize) +{ + return LZ4_decompress_generic(source, dest, 0, originalSize, + endOnOutputSize, decode_full_block, + withPrefix64k, + (BYTE *)dest - 64 * KB, NULL, 0); +} + +/* ===== Instantiate a few more decoding cases, used more than once. ===== */ + +static int LZ4_decompress_safe_withPrefix64k(const char *source, char *dest, + int compressedSize, int maxOutputSize) +{ + return LZ4_decompress_generic(source, dest, + compressedSize, maxOutputSize, + endOnInputSize, decode_full_block, + withPrefix64k, + (BYTE *)dest - 64 * KB, NULL, 0); +} + +static int LZ4_decompress_safe_withSmallPrefix(const char *source, char *dest, + int compressedSize, + int maxOutputSize, + size_t prefixSize) +{ + return LZ4_decompress_generic(source, dest, + compressedSize, maxOutputSize, + endOnInputSize, decode_full_block, + noDict, + (BYTE *)dest - prefixSize, NULL, 0); +} + +static int LZ4_decompress_safe_forceExtDict(const char *source, char *dest, + int compressedSize, int maxOutputSize, + const void *dictStart, size_t dictSize) +{ + return LZ4_decompress_generic(source, dest, + compressedSize, maxOutputSize, + endOnInputSize, decode_full_block, + usingExtDict, (BYTE *)dest, + (const BYTE *)dictStart, dictSize); +} + +static int LZ4_decompress_fast_extDict(const char *source, char *dest, + int originalSize, + const void *dictStart, size_t dictSize) +{ + return LZ4_decompress_generic(source, dest, + 0, originalSize, + endOnOutputSize, decode_full_block, + usingExtDict, (BYTE *)dest, + (const BYTE *)dictStart, dictSize); +} + +/* + * The "double dictionary" mode, for use with e.g. ring buffers: the first part + * of the dictionary is passed as prefix, and the second via dictStart + dictSize. + * These routines are used only once, in LZ4_decompress_*_continue(). + */ +static FORCE_INLINE +int LZ4_decompress_safe_doubleDict(const char *source, char *dest, + int compressedSize, int maxOutputSize, + size_t prefixSize, + const void *dictStart, size_t dictSize) +{ + return LZ4_decompress_generic(source, dest, + compressedSize, maxOutputSize, + endOnInputSize, decode_full_block, + usingExtDict, (BYTE *)dest - prefixSize, + (const BYTE *)dictStart, dictSize); +} + +static FORCE_INLINE +int LZ4_decompress_fast_doubleDict(const char *source, char *dest, + int originalSize, size_t prefixSize, + const void *dictStart, size_t dictSize) +{ + return LZ4_decompress_generic(source, dest, + 0, originalSize, + endOnOutputSize, decode_full_block, + usingExtDict, (BYTE *)dest - prefixSize, + (const BYTE *)dictStart, dictSize); +} + +/* ===== streaming decompression functions ===== */ + +int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode, + const char *dictionary, int dictSize) +{ + LZ4_streamDecode_t_internal *lz4sd = + &LZ4_streamDecode->internal_donotuse; + + lz4sd->prefixSize = (size_t) dictSize; + lz4sd->prefixEnd = (const BYTE *) dictionary + dictSize; + lz4sd->externalDict = NULL; + lz4sd->extDictSize = 0; + return 1; +} + +/* + * *_continue() : + * These decoding functions allow decompression of multiple blocks + * in "streaming" mode. + * Previously decoded blocks must still be available at the memory + * position where they were decoded. + * If it's not possible, save the relevant part of + * decoded data into a safe buffer, + * and indicate where it stands using LZ4_setStreamDecode() + */ +int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode, + const char *source, char *dest, int compressedSize, int maxOutputSize) +{ + LZ4_streamDecode_t_internal *lz4sd = + &LZ4_streamDecode->internal_donotuse; + int result; + + if (lz4sd->prefixSize == 0) { + /* The first call, no dictionary yet. */ + assert(lz4sd->extDictSize == 0); + result = LZ4_decompress_safe(source, dest, + compressedSize, maxOutputSize); + if (result <= 0) + return result; + lz4sd->prefixSize = result; + lz4sd->prefixEnd = (BYTE *)dest + result; + } else if (lz4sd->prefixEnd == (BYTE *)dest) { + /* They're rolling the current segment. */ + if (lz4sd->prefixSize >= 64 * KB - 1) + result = LZ4_decompress_safe_withPrefix64k(source, dest, + compressedSize, maxOutputSize); + else if (lz4sd->extDictSize == 0) + result = LZ4_decompress_safe_withSmallPrefix(source, + dest, compressedSize, maxOutputSize, + lz4sd->prefixSize); + else + result = LZ4_decompress_safe_doubleDict(source, dest, + compressedSize, maxOutputSize, + lz4sd->prefixSize, + lz4sd->externalDict, lz4sd->extDictSize); + if (result <= 0) + return result; + lz4sd->prefixSize += result; + lz4sd->prefixEnd += result; + } else { + /* + * The buffer wraps around, or they're + * switching to another buffer. + */ + lz4sd->extDictSize = lz4sd->prefixSize; + lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize; + result = LZ4_decompress_safe_forceExtDict(source, dest, + compressedSize, maxOutputSize, + lz4sd->externalDict, lz4sd->extDictSize); + if (result <= 0) + return result; + lz4sd->prefixSize = result; + lz4sd->prefixEnd = (BYTE *)dest + result; + } + + return result; +} + +int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode, + const char *source, char *dest, int originalSize) +{ + LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse; + int result; + + if (lz4sd->prefixSize == 0) { + assert(lz4sd->extDictSize == 0); + result = LZ4_decompress_fast(source, dest, originalSize); + if (result <= 0) + return result; + lz4sd->prefixSize = originalSize; + lz4sd->prefixEnd = (BYTE *)dest + originalSize; + } else if (lz4sd->prefixEnd == (BYTE *)dest) { + if (lz4sd->prefixSize >= 64 * KB - 1 || + lz4sd->extDictSize == 0) + result = LZ4_decompress_fast(source, dest, + originalSize); + else + result = LZ4_decompress_fast_doubleDict(source, dest, + originalSize, lz4sd->prefixSize, + lz4sd->externalDict, lz4sd->extDictSize); + if (result <= 0) + return result; + lz4sd->prefixSize += originalSize; + lz4sd->prefixEnd += originalSize; + } else { + lz4sd->extDictSize = lz4sd->prefixSize; + lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize; + result = LZ4_decompress_fast_extDict(source, dest, + originalSize, lz4sd->externalDict, lz4sd->extDictSize); + if (result <= 0) + return result; + lz4sd->prefixSize = originalSize; + lz4sd->prefixEnd = (BYTE *)dest + originalSize; + } + return result; +} + +int LZ4_decompress_safe_usingDict(const char *source, char *dest, + int compressedSize, int maxOutputSize, + const char *dictStart, int dictSize) +{ + if (dictSize == 0) + return LZ4_decompress_safe(source, dest, + compressedSize, maxOutputSize); + if (dictStart+dictSize == dest) { + if (dictSize >= 64 * KB - 1) + return LZ4_decompress_safe_withPrefix64k(source, dest, + compressedSize, maxOutputSize); + return LZ4_decompress_safe_withSmallPrefix(source, dest, + compressedSize, maxOutputSize, dictSize); + } + return LZ4_decompress_safe_forceExtDict(source, dest, + compressedSize, maxOutputSize, dictStart, dictSize); +} + +int LZ4_decompress_fast_usingDict(const char *source, char *dest, + int originalSize, + const char *dictStart, int dictSize) +{ + if (dictSize == 0 || dictStart + dictSize == dest) + return LZ4_decompress_fast(source, dest, originalSize); + + return LZ4_decompress_fast_extDict(source, dest, originalSize, + dictStart, dictSize); +} + +#ifndef STATIC +EXPORT_SYMBOL(LZ4_decompress_safe); +EXPORT_SYMBOL(LZ4_decompress_safe_partial); +EXPORT_SYMBOL(LZ4_decompress_fast); +EXPORT_SYMBOL(LZ4_setStreamDecode); +EXPORT_SYMBOL(LZ4_decompress_safe_continue); +EXPORT_SYMBOL(LZ4_decompress_fast_continue); +EXPORT_SYMBOL(LZ4_decompress_safe_usingDict); +EXPORT_SYMBOL(LZ4_decompress_fast_usingDict); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("LZ4 decompressor"); +#endif diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h new file mode 100644 index 000000000..330aa539b --- /dev/null +++ b/lib/lz4/lz4defs.h @@ -0,0 +1,247 @@ +#ifndef __LZ4DEFS_H__ +#define __LZ4DEFS_H__ + +/* + * lz4defs.h -- common and architecture specific defines for the kernel usage + + * LZ4 - Fast LZ compression algorithm + * Copyright (C) 2011-2016, Yann Collet. + * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * You can contact the author at : + * - LZ4 homepage : http://www.lz4.org + * - LZ4 source repository : https://github.com/lz4/lz4 + * + * Changed for kernel usage by: + * Sven Schmidt <4sschmid@informatik.uni-hamburg.de> + */ + +#include + +#include +#include /* memset, memcpy */ + +#define FORCE_INLINE __always_inline + +/*-************************************ + * Basic Types + **************************************/ +#include + +typedef uint8_t BYTE; +typedef uint16_t U16; +typedef uint32_t U32; +typedef int32_t S32; +typedef uint64_t U64; +typedef uintptr_t uptrval; + +/*-************************************ + * Architecture specifics + **************************************/ +#if defined(CONFIG_64BIT) +#define LZ4_ARCH64 1 +#else +#define LZ4_ARCH64 0 +#endif + +#if defined(__LITTLE_ENDIAN) +#define LZ4_LITTLE_ENDIAN 1 +#else +#define LZ4_LITTLE_ENDIAN 0 +#endif + +/*-************************************ + * Constants + **************************************/ +#define MINMATCH 4 + +#define WILDCOPYLENGTH 8 +#define LASTLITERALS 5 +#define MFLIMIT (WILDCOPYLENGTH + MINMATCH) +/* + * ensure it's possible to write 2 x wildcopyLength + * without overflowing output buffer + */ +#define MATCH_SAFEGUARD_DISTANCE ((2 * WILDCOPYLENGTH) - MINMATCH) + +/* Increase this value ==> compression run slower on incompressible data */ +#define LZ4_SKIPTRIGGER 6 + +#define HASH_UNIT sizeof(size_t) + +#define KB (1 << 10) +#define MB (1 << 20) +#define GB (1U << 30) + +#define MAXD_LOG 16 +#define MAX_DISTANCE ((1 << MAXD_LOG) - 1) +#define STEPSIZE sizeof(size_t) + +#define ML_BITS 4 +#define ML_MASK ((1U << ML_BITS) - 1) +#define RUN_BITS (8 - ML_BITS) +#define RUN_MASK ((1U << RUN_BITS) - 1) + +/*-************************************ + * Reading and writing into memory + **************************************/ +static FORCE_INLINE U16 LZ4_read16(const void *ptr) +{ + return get_unaligned((const U16 *)ptr); +} + +static FORCE_INLINE U32 LZ4_read32(const void *ptr) +{ + return get_unaligned((const U32 *)ptr); +} + +static FORCE_INLINE size_t LZ4_read_ARCH(const void *ptr) +{ + return get_unaligned((const size_t *)ptr); +} + +static FORCE_INLINE void LZ4_write16(void *memPtr, U16 value) +{ + put_unaligned(value, (U16 *)memPtr); +} + +static FORCE_INLINE void LZ4_write32(void *memPtr, U32 value) +{ + put_unaligned(value, (U32 *)memPtr); +} + +static FORCE_INLINE U16 LZ4_readLE16(const void *memPtr) +{ + return get_unaligned_le16(memPtr); +} + +static FORCE_INLINE void LZ4_writeLE16(void *memPtr, U16 value) +{ + return put_unaligned_le16(value, memPtr); +} + +/* + * LZ4 relies on memcpy with a constant size being inlined. In freestanding + * environments, the compiler can't assume the implementation of memcpy() is + * standard compliant, so apply its specialized memcpy() inlining logic. When + * possible, use __builtin_memcpy() to tell the compiler to analyze memcpy() + * as-if it were standard compliant, so it can inline it in freestanding + * environments. This is needed when decompressing the Linux Kernel, for example. + */ +#define LZ4_memcpy(dst, src, size) __builtin_memcpy(dst, src, size) +#define LZ4_memmove(dst, src, size) __builtin_memmove(dst, src, size) + +static FORCE_INLINE void LZ4_copy8(void *dst, const void *src) +{ +#if LZ4_ARCH64 + U64 a = get_unaligned((const U64 *)src); + + put_unaligned(a, (U64 *)dst); +#else + U32 a = get_unaligned((const U32 *)src); + U32 b = get_unaligned((const U32 *)src + 1); + + put_unaligned(a, (U32 *)dst); + put_unaligned(b, (U32 *)dst + 1); +#endif +} + +/* + * customized variant of memcpy, + * which can overwrite up to 7 bytes beyond dstEnd + */ +static FORCE_INLINE void LZ4_wildCopy(void *dstPtr, + const void *srcPtr, void *dstEnd) +{ + BYTE *d = (BYTE *)dstPtr; + const BYTE *s = (const BYTE *)srcPtr; + BYTE *const e = (BYTE *)dstEnd; + + do { + LZ4_copy8(d, s); + d += 8; + s += 8; + } while (d < e); +} + +static FORCE_INLINE unsigned int LZ4_NbCommonBytes(register size_t val) +{ +#if LZ4_LITTLE_ENDIAN + return __ffs(val) >> 3; +#else + return (BITS_PER_LONG - 1 - __fls(val)) >> 3; +#endif +} + +static FORCE_INLINE unsigned int LZ4_count( + const BYTE *pIn, + const BYTE *pMatch, + const BYTE *pInLimit) +{ + const BYTE *const pStart = pIn; + + while (likely(pIn < pInLimit - (STEPSIZE - 1))) { + size_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn); + + if (!diff) { + pIn += STEPSIZE; + pMatch += STEPSIZE; + continue; + } + + pIn += LZ4_NbCommonBytes(diff); + + return (unsigned int)(pIn - pStart); + } + +#if LZ4_ARCH64 + if ((pIn < (pInLimit - 3)) + && (LZ4_read32(pMatch) == LZ4_read32(pIn))) { + pIn += 4; + pMatch += 4; + } +#endif + + if ((pIn < (pInLimit - 1)) + && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { + pIn += 2; + pMatch += 2; + } + + if ((pIn < pInLimit) && (*pMatch == *pIn)) + pIn++; + + return (unsigned int)(pIn - pStart); +} + +typedef enum { noLimit = 0, limitedOutput = 1 } limitedOutput_directive; +typedef enum { byPtr, byU32, byU16 } tableType_t; + +typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive; +typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive; + +typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive; +typedef enum { decode_full_block = 0, partial_decode = 1 } earlyEnd_directive; + +#define LZ4_STATIC_ASSERT(c) BUILD_BUG_ON(!(c)) + +#endif diff --git a/lib/lz4/lz4hc_compress.c b/lib/lz4/lz4hc_compress.c new file mode 100644 index 000000000..e7ac8694b --- /dev/null +++ b/lib/lz4/lz4hc_compress.c @@ -0,0 +1,768 @@ +/* + * LZ4 HC - High Compression Mode of LZ4 + * Copyright (C) 2011-2015, Yann Collet. + * + * BSD 2 - Clause License (http://www.opensource.org/licenses/bsd - license.php) + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * You can contact the author at : + * - LZ4 homepage : http://www.lz4.org + * - LZ4 source repository : https://github.com/lz4/lz4 + * + * Changed for kernel usage by: + * Sven Schmidt <4sschmid@informatik.uni-hamburg.de> + */ + +/*-************************************ + * Dependencies + **************************************/ +#include +#include "lz4defs.h" +#include +#include +#include /* memset */ + +/* ************************************* + * Local Constants and types + ***************************************/ + +#define OPTIMAL_ML (int)((ML_MASK - 1) + MINMATCH) + +#define HASH_FUNCTION(i) (((i) * 2654435761U) \ + >> ((MINMATCH*8) - LZ4HC_HASH_LOG)) +#define DELTANEXTU16(p) chainTable[(U16)(p)] /* faster */ + +static U32 LZ4HC_hashPtr(const void *ptr) +{ + return HASH_FUNCTION(LZ4_read32(ptr)); +} + +/************************************** + * HC Compression + **************************************/ +static void LZ4HC_init(LZ4HC_CCtx_internal *hc4, const BYTE *start) +{ + memset((void *)hc4->hashTable, 0, sizeof(hc4->hashTable)); + memset(hc4->chainTable, 0xFF, sizeof(hc4->chainTable)); + hc4->nextToUpdate = 64 * KB; + hc4->base = start - 64 * KB; + hc4->end = start; + hc4->dictBase = start - 64 * KB; + hc4->dictLimit = 64 * KB; + hc4->lowLimit = 64 * KB; +} + +/* Update chains up to ip (excluded) */ +static FORCE_INLINE void LZ4HC_Insert(LZ4HC_CCtx_internal *hc4, + const BYTE *ip) +{ + U16 * const chainTable = hc4->chainTable; + U32 * const hashTable = hc4->hashTable; + const BYTE * const base = hc4->base; + U32 const target = (U32)(ip - base); + U32 idx = hc4->nextToUpdate; + + while (idx < target) { + U32 const h = LZ4HC_hashPtr(base + idx); + size_t delta = idx - hashTable[h]; + + if (delta > MAX_DISTANCE) + delta = MAX_DISTANCE; + + DELTANEXTU16(idx) = (U16)delta; + + hashTable[h] = idx; + idx++; + } + + hc4->nextToUpdate = target; +} + +static FORCE_INLINE int LZ4HC_InsertAndFindBestMatch( + LZ4HC_CCtx_internal *hc4, /* Index table will be updated */ + const BYTE *ip, + const BYTE * const iLimit, + const BYTE **matchpos, + const int maxNbAttempts) +{ + U16 * const chainTable = hc4->chainTable; + U32 * const HashTable = hc4->hashTable; + const BYTE * const base = hc4->base; + const BYTE * const dictBase = hc4->dictBase; + const U32 dictLimit = hc4->dictLimit; + const U32 lowLimit = (hc4->lowLimit + 64 * KB > (U32)(ip - base)) + ? hc4->lowLimit + : (U32)(ip - base) - (64 * KB - 1); + U32 matchIndex; + int nbAttempts = maxNbAttempts; + size_t ml = 0; + + /* HC4 match finder */ + LZ4HC_Insert(hc4, ip); + matchIndex = HashTable[LZ4HC_hashPtr(ip)]; + + while ((matchIndex >= lowLimit) + && (nbAttempts)) { + nbAttempts--; + if (matchIndex >= dictLimit) { + const BYTE * const match = base + matchIndex; + + if (*(match + ml) == *(ip + ml) + && (LZ4_read32(match) == LZ4_read32(ip))) { + size_t const mlt = LZ4_count(ip + MINMATCH, + match + MINMATCH, iLimit) + MINMATCH; + + if (mlt > ml) { + ml = mlt; + *matchpos = match; + } + } + } else { + const BYTE * const match = dictBase + matchIndex; + + if (LZ4_read32(match) == LZ4_read32(ip)) { + size_t mlt; + const BYTE *vLimit = ip + + (dictLimit - matchIndex); + + if (vLimit > iLimit) + vLimit = iLimit; + mlt = LZ4_count(ip + MINMATCH, + match + MINMATCH, vLimit) + MINMATCH; + if ((ip + mlt == vLimit) + && (vLimit < iLimit)) + mlt += LZ4_count(ip + mlt, + base + dictLimit, + iLimit); + if (mlt > ml) { + /* virtual matchpos */ + ml = mlt; + *matchpos = base + matchIndex; + } + } + } + matchIndex -= DELTANEXTU16(matchIndex); + } + + return (int)ml; +} + +static FORCE_INLINE int LZ4HC_InsertAndGetWiderMatch( + LZ4HC_CCtx_internal *hc4, + const BYTE * const ip, + const BYTE * const iLowLimit, + const BYTE * const iHighLimit, + int longest, + const BYTE **matchpos, + const BYTE **startpos, + const int maxNbAttempts) +{ + U16 * const chainTable = hc4->chainTable; + U32 * const HashTable = hc4->hashTable; + const BYTE * const base = hc4->base; + const U32 dictLimit = hc4->dictLimit; + const BYTE * const lowPrefixPtr = base + dictLimit; + const U32 lowLimit = (hc4->lowLimit + 64 * KB > (U32)(ip - base)) + ? hc4->lowLimit + : (U32)(ip - base) - (64 * KB - 1); + const BYTE * const dictBase = hc4->dictBase; + U32 matchIndex; + int nbAttempts = maxNbAttempts; + int delta = (int)(ip - iLowLimit); + + /* First Match */ + LZ4HC_Insert(hc4, ip); + matchIndex = HashTable[LZ4HC_hashPtr(ip)]; + + while ((matchIndex >= lowLimit) + && (nbAttempts)) { + nbAttempts--; + if (matchIndex >= dictLimit) { + const BYTE *matchPtr = base + matchIndex; + + if (*(iLowLimit + longest) + == *(matchPtr - delta + longest)) { + if (LZ4_read32(matchPtr) == LZ4_read32(ip)) { + int mlt = MINMATCH + LZ4_count( + ip + MINMATCH, + matchPtr + MINMATCH, + iHighLimit); + int back = 0; + + while ((ip + back > iLowLimit) + && (matchPtr + back > lowPrefixPtr) + && (ip[back - 1] == matchPtr[back - 1])) + back--; + + mlt -= back; + + if (mlt > longest) { + longest = (int)mlt; + *matchpos = matchPtr + back; + *startpos = ip + back; + } + } + } + } else { + const BYTE * const matchPtr = dictBase + matchIndex; + + if (LZ4_read32(matchPtr) == LZ4_read32(ip)) { + size_t mlt; + int back = 0; + const BYTE *vLimit = ip + (dictLimit - matchIndex); + + if (vLimit > iHighLimit) + vLimit = iHighLimit; + + mlt = LZ4_count(ip + MINMATCH, + matchPtr + MINMATCH, vLimit) + MINMATCH; + + if ((ip + mlt == vLimit) && (vLimit < iHighLimit)) + mlt += LZ4_count(ip + mlt, base + dictLimit, + iHighLimit); + while ((ip + back > iLowLimit) + && (matchIndex + back > lowLimit) + && (ip[back - 1] == matchPtr[back - 1])) + back--; + + mlt -= back; + + if ((int)mlt > longest) { + longest = (int)mlt; + *matchpos = base + matchIndex + back; + *startpos = ip + back; + } + } + } + + matchIndex -= DELTANEXTU16(matchIndex); + } + + return longest; +} + +static FORCE_INLINE int LZ4HC_encodeSequence( + const BYTE **ip, + BYTE **op, + const BYTE **anchor, + int matchLength, + const BYTE * const match, + limitedOutput_directive limitedOutputBuffer, + BYTE *oend) +{ + int length; + BYTE *token; + + /* Encode Literal length */ + length = (int)(*ip - *anchor); + token = (*op)++; + + if ((limitedOutputBuffer) + && ((*op + (length>>8) + + length + (2 + 1 + LASTLITERALS)) > oend)) { + /* Check output limit */ + return 1; + } + if (length >= (int)RUN_MASK) { + int len; + + *token = (RUN_MASK< 254 ; len -= 255) + *(*op)++ = 255; + *(*op)++ = (BYTE)len; + } else + *token = (BYTE)(length<>8) + + (1 + LASTLITERALS) > oend)) { + /* Check output limit */ + return 1; + } + + if (length >= (int)ML_MASK) { + *token += ML_MASK; + length -= ML_MASK; + + for (; length > 509 ; length -= 510) { + *(*op)++ = 255; + *(*op)++ = 255; + } + + if (length > 254) { + length -= 255; + *(*op)++ = 255; + } + + *(*op)++ = (BYTE)length; + } else + *token += (BYTE)(length); + + /* Prepare next loop */ + *ip += matchLength; + *anchor = *ip; + + return 0; +} + +static int LZ4HC_compress_generic( + LZ4HC_CCtx_internal *const ctx, + const char * const source, + char * const dest, + int const inputSize, + int const maxOutputSize, + int compressionLevel, + limitedOutput_directive limit + ) +{ + const BYTE *ip = (const BYTE *) source; + const BYTE *anchor = ip; + const BYTE * const iend = ip + inputSize; + const BYTE * const mflimit = iend - MFLIMIT; + const BYTE * const matchlimit = (iend - LASTLITERALS); + + BYTE *op = (BYTE *) dest; + BYTE * const oend = op + maxOutputSize; + + unsigned int maxNbAttempts; + int ml, ml2, ml3, ml0; + const BYTE *ref = NULL; + const BYTE *start2 = NULL; + const BYTE *ref2 = NULL; + const BYTE *start3 = NULL; + const BYTE *ref3 = NULL; + const BYTE *start0; + const BYTE *ref0; + + /* init */ + if (compressionLevel > LZ4HC_MAX_CLEVEL) + compressionLevel = LZ4HC_MAX_CLEVEL; + if (compressionLevel < 1) + compressionLevel = LZ4HC_DEFAULT_CLEVEL; + maxNbAttempts = 1 << (compressionLevel - 1); + ctx->end += inputSize; + + ip++; + + /* Main Loop */ + while (ip < mflimit) { + ml = LZ4HC_InsertAndFindBestMatch(ctx, ip, + matchlimit, (&ref), maxNbAttempts); + if (!ml) { + ip++; + continue; + } + + /* saved, in case we would skip too much */ + start0 = ip; + ref0 = ref; + ml0 = ml; + +_Search2: + if (ip + ml < mflimit) + ml2 = LZ4HC_InsertAndGetWiderMatch(ctx, + ip + ml - 2, ip + 0, + matchlimit, ml, &ref2, + &start2, maxNbAttempts); + else + ml2 = ml; + + if (ml2 == ml) { + /* No better match */ + if (LZ4HC_encodeSequence(&ip, &op, + &anchor, ml, ref, limit, oend)) + return 0; + continue; + } + + if (start0 < ip) { + if (start2 < ip + ml0) { + /* empirical */ + ip = start0; + ref = ref0; + ml = ml0; + } + } + + /* Here, start0 == ip */ + if ((start2 - ip) < 3) { + /* First Match too small : removed */ + ml = ml2; + ip = start2; + ref = ref2; + goto _Search2; + } + +_Search3: + /* + * Currently we have : + * ml2 > ml1, and + * ip1 + 3 <= ip2 (usually < ip1 + ml1) + */ + if ((start2 - ip) < OPTIMAL_ML) { + int correction; + int new_ml = ml; + + if (new_ml > OPTIMAL_ML) + new_ml = OPTIMAL_ML; + if (ip + new_ml > start2 + ml2 - MINMATCH) + new_ml = (int)(start2 - ip) + ml2 - MINMATCH; + + correction = new_ml - (int)(start2 - ip); + + if (correction > 0) { + start2 += correction; + ref2 += correction; + ml2 -= correction; + } + } + /* + * Now, we have start2 = ip + new_ml, + * with new_ml = min(ml, OPTIMAL_ML = 18) + */ + + if (start2 + ml2 < mflimit) + ml3 = LZ4HC_InsertAndGetWiderMatch(ctx, + start2 + ml2 - 3, start2, + matchlimit, ml2, &ref3, &start3, + maxNbAttempts); + else + ml3 = ml2; + + if (ml3 == ml2) { + /* No better match : 2 sequences to encode */ + /* ip & ref are known; Now for ml */ + if (start2 < ip + ml) + ml = (int)(start2 - ip); + /* Now, encode 2 sequences */ + if (LZ4HC_encodeSequence(&ip, &op, &anchor, + ml, ref, limit, oend)) + return 0; + ip = start2; + if (LZ4HC_encodeSequence(&ip, &op, &anchor, + ml2, ref2, limit, oend)) + return 0; + continue; + } + + if (start3 < ip + ml + 3) { + /* Not enough space for match 2 : remove it */ + if (start3 >= (ip + ml)) { + /* can write Seq1 immediately + * ==> Seq2 is removed, + * so Seq3 becomes Seq1 + */ + if (start2 < ip + ml) { + int correction = (int)(ip + ml - start2); + + start2 += correction; + ref2 += correction; + ml2 -= correction; + if (ml2 < MINMATCH) { + start2 = start3; + ref2 = ref3; + ml2 = ml3; + } + } + + if (LZ4HC_encodeSequence(&ip, &op, &anchor, + ml, ref, limit, oend)) + return 0; + ip = start3; + ref = ref3; + ml = ml3; + + start0 = start2; + ref0 = ref2; + ml0 = ml2; + goto _Search2; + } + + start2 = start3; + ref2 = ref3; + ml2 = ml3; + goto _Search3; + } + + /* + * OK, now we have 3 ascending matches; + * let's write at least the first one + * ip & ref are known; Now for ml + */ + if (start2 < ip + ml) { + if ((start2 - ip) < (int)ML_MASK) { + int correction; + + if (ml > OPTIMAL_ML) + ml = OPTIMAL_ML; + if (ip + ml > start2 + ml2 - MINMATCH) + ml = (int)(start2 - ip) + ml2 - MINMATCH; + correction = ml - (int)(start2 - ip); + if (correction > 0) { + start2 += correction; + ref2 += correction; + ml2 -= correction; + } + } else + ml = (int)(start2 - ip); + } + if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml, + ref, limit, oend)) + return 0; + + ip = start2; + ref = ref2; + ml = ml2; + + start2 = start3; + ref2 = ref3; + ml2 = ml3; + + goto _Search3; + } + + /* Encode Last Literals */ + { + int lastRun = (int)(iend - anchor); + + if ((limit) + && (((char *)op - dest) + lastRun + 1 + + ((lastRun + 255 - RUN_MASK)/255) + > (U32)maxOutputSize)) { + /* Check output limit */ + return 0; + } + if (lastRun >= (int)RUN_MASK) { + *op++ = (RUN_MASK< 254 ; lastRun -= 255) + *op++ = 255; + *op++ = (BYTE) lastRun; + } else + *op++ = (BYTE)(lastRun<internal_donotuse; + + if (((size_t)(state)&(sizeof(void *) - 1)) != 0) { + /* Error : state is not aligned + * for pointers (32 or 64 bits) + */ + return 0; + } + + LZ4HC_init(ctx, (const BYTE *)src); + + if (maxDstSize < LZ4_compressBound(srcSize)) + return LZ4HC_compress_generic(ctx, src, dst, + srcSize, maxDstSize, compressionLevel, limitedOutput); + else + return LZ4HC_compress_generic(ctx, src, dst, + srcSize, maxDstSize, compressionLevel, noLimit); +} + +int LZ4_compress_HC(const char *src, char *dst, int srcSize, + int maxDstSize, int compressionLevel, void *wrkmem) +{ + return LZ4_compress_HC_extStateHC(wrkmem, src, dst, + srcSize, maxDstSize, compressionLevel); +} +EXPORT_SYMBOL(LZ4_compress_HC); + +/************************************** + * Streaming Functions + **************************************/ +void LZ4_resetStreamHC(LZ4_streamHC_t *LZ4_streamHCPtr, int compressionLevel) +{ + LZ4_streamHCPtr->internal_donotuse.base = NULL; + LZ4_streamHCPtr->internal_donotuse.compressionLevel = (unsigned int)compressionLevel; +} + +int LZ4_loadDictHC(LZ4_streamHC_t *LZ4_streamHCPtr, + const char *dictionary, + int dictSize) +{ + LZ4HC_CCtx_internal *ctxPtr = &LZ4_streamHCPtr->internal_donotuse; + + if (dictSize > 64 * KB) { + dictionary += dictSize - 64 * KB; + dictSize = 64 * KB; + } + LZ4HC_init(ctxPtr, (const BYTE *)dictionary); + if (dictSize >= 4) + LZ4HC_Insert(ctxPtr, (const BYTE *)dictionary + (dictSize - 3)); + ctxPtr->end = (const BYTE *)dictionary + dictSize; + return dictSize; +} +EXPORT_SYMBOL(LZ4_loadDictHC); + +/* compression */ + +static void LZ4HC_setExternalDict( + LZ4HC_CCtx_internal *ctxPtr, + const BYTE *newBlock) +{ + if (ctxPtr->end >= ctxPtr->base + 4) { + /* Referencing remaining dictionary content */ + LZ4HC_Insert(ctxPtr, ctxPtr->end - 3); + } + + /* + * Only one memory segment for extDict, + * so any previous extDict is lost at this stage + */ + ctxPtr->lowLimit = ctxPtr->dictLimit; + ctxPtr->dictLimit = (U32)(ctxPtr->end - ctxPtr->base); + ctxPtr->dictBase = ctxPtr->base; + ctxPtr->base = newBlock - ctxPtr->dictLimit; + ctxPtr->end = newBlock; + /* match referencing will resume from there */ + ctxPtr->nextToUpdate = ctxPtr->dictLimit; +} + +static int LZ4_compressHC_continue_generic( + LZ4_streamHC_t *LZ4_streamHCPtr, + const char *source, + char *dest, + int inputSize, + int maxOutputSize, + limitedOutput_directive limit) +{ + LZ4HC_CCtx_internal *ctxPtr = &LZ4_streamHCPtr->internal_donotuse; + + /* auto - init if forgotten */ + if (ctxPtr->base == NULL) + LZ4HC_init(ctxPtr, (const BYTE *) source); + + /* Check overflow */ + if ((size_t)(ctxPtr->end - ctxPtr->base) > 2 * GB) { + size_t dictSize = (size_t)(ctxPtr->end - ctxPtr->base) + - ctxPtr->dictLimit; + if (dictSize > 64 * KB) + dictSize = 64 * KB; + LZ4_loadDictHC(LZ4_streamHCPtr, + (const char *)(ctxPtr->end) - dictSize, (int)dictSize); + } + + /* Check if blocks follow each other */ + if ((const BYTE *)source != ctxPtr->end) + LZ4HC_setExternalDict(ctxPtr, (const BYTE *)source); + + /* Check overlapping input/dictionary space */ + { + const BYTE *sourceEnd = (const BYTE *) source + inputSize; + const BYTE * const dictBegin = ctxPtr->dictBase + ctxPtr->lowLimit; + const BYTE * const dictEnd = ctxPtr->dictBase + ctxPtr->dictLimit; + + if ((sourceEnd > dictBegin) + && ((const BYTE *)source < dictEnd)) { + if (sourceEnd > dictEnd) + sourceEnd = dictEnd; + ctxPtr->lowLimit = (U32)(sourceEnd - ctxPtr->dictBase); + + if (ctxPtr->dictLimit - ctxPtr->lowLimit < 4) + ctxPtr->lowLimit = ctxPtr->dictLimit; + } + } + + return LZ4HC_compress_generic(ctxPtr, source, dest, + inputSize, maxOutputSize, ctxPtr->compressionLevel, limit); +} + +int LZ4_compress_HC_continue( + LZ4_streamHC_t *LZ4_streamHCPtr, + const char *source, + char *dest, + int inputSize, + int maxOutputSize) +{ + if (maxOutputSize < LZ4_compressBound(inputSize)) + return LZ4_compressHC_continue_generic(LZ4_streamHCPtr, + source, dest, inputSize, maxOutputSize, limitedOutput); + else + return LZ4_compressHC_continue_generic(LZ4_streamHCPtr, + source, dest, inputSize, maxOutputSize, noLimit); +} +EXPORT_SYMBOL(LZ4_compress_HC_continue); + +/* dictionary saving */ + +int LZ4_saveDictHC( + LZ4_streamHC_t *LZ4_streamHCPtr, + char *safeBuffer, + int dictSize) +{ + LZ4HC_CCtx_internal *const streamPtr = &LZ4_streamHCPtr->internal_donotuse; + int const prefixSize = (int)(streamPtr->end + - (streamPtr->base + streamPtr->dictLimit)); + + if (dictSize > 64 * KB) + dictSize = 64 * KB; + if (dictSize < 4) + dictSize = 0; + if (dictSize > prefixSize) + dictSize = prefixSize; + + memmove(safeBuffer, streamPtr->end - dictSize, dictSize); + + { + U32 const endIndex = (U32)(streamPtr->end - streamPtr->base); + + streamPtr->end = (const BYTE *)safeBuffer + dictSize; + streamPtr->base = streamPtr->end - endIndex; + streamPtr->dictLimit = endIndex - dictSize; + streamPtr->lowLimit = endIndex - dictSize; + + if (streamPtr->nextToUpdate < streamPtr->dictLimit) + streamPtr->nextToUpdate = streamPtr->dictLimit; + } + return dictSize; +} +EXPORT_SYMBOL(LZ4_saveDictHC); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("LZ4 HC compressor"); -- cgit v1.2.3