summaryrefslogtreecommitdiffstats
path: root/other-licenses/snappy
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-15 03:35:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-15 03:35:49 +0000
commitd8bbc7858622b6d9c278469aab701ca0b609cddf (patch)
treeeff41dc61d9f714852212739e6b3738b82a2af87 /other-licenses/snappy
parentReleasing progress-linux version 125.0.3-1~progress7.99u1. (diff)
downloadfirefox-d8bbc7858622b6d9c278469aab701ca0b609cddf.tar.xz
firefox-d8bbc7858622b6d9c278469aab701ca0b609cddf.zip
Merging upstream version 126.0.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'other-licenses/snappy')
-rw-r--r--other-licenses/snappy/01-explicit.patch22
-rw-r--r--other-licenses/snappy/README12
-rw-r--r--other-licenses/snappy/snappy-stubs-public.h5
-rw-r--r--other-licenses/snappy/src/CONTRIBUTING.md33
-rw-r--r--other-licenses/snappy/src/NEWS6
-rw-r--r--other-licenses/snappy/src/README.md33
-rw-r--r--other-licenses/snappy/src/snappy-internal.h127
-rw-r--r--other-licenses/snappy/src/snappy-stubs-internal.h85
-rw-r--r--other-licenses/snappy/src/snappy-test.cc2
-rw-r--r--other-licenses/snappy/src/snappy-test.h18
-rw-r--r--other-licenses/snappy/src/snappy.cc730
-rw-r--r--other-licenses/snappy/src/snappy.h56
-rw-r--r--other-licenses/snappy/src/snappy_compress_fuzzer.cc32
-rw-r--r--other-licenses/snappy/src/snappy_unittest.cc86
14 files changed, 972 insertions, 275 deletions
diff --git a/other-licenses/snappy/01-explicit.patch b/other-licenses/snappy/01-explicit.patch
new file mode 100644
index 0000000000..7aeb130014
--- /dev/null
+++ b/other-licenses/snappy/01-explicit.patch
@@ -0,0 +1,22 @@
+diff --git a/other-licenses/snappy/src/snappy.h b/other-licenses/snappy/src/snappy.h
+--- a/other-licenses/snappy/src/snappy.h
++++ b/other-licenses/snappy/src/snappy.h
+@@ -60,17 +60,17 @@ namespace snappy {
+ // 9 in the future.
+ // If you played with other compression algorithms, level 1 is equivalent to
+ // fast mode (level 1) of LZ4, level 2 is equivalent to LZ4's level 2 mode
+ // and compresses somewhere around zstd:-3 and zstd:-2 but generally with
+ // faster decompression speeds than snappy:1 and zstd:-3.
+ int level = DefaultCompressionLevel();
+
+ constexpr CompressionOptions() = default;
+- constexpr CompressionOptions(int compression_level)
++ constexpr explicit CompressionOptions(int compression_level)
+ : level(compression_level) {}
+ static constexpr int MinCompressionLevel() { return 1; }
+ static constexpr int MaxCompressionLevel() { return 2; }
+ static constexpr int DefaultCompressionLevel() { return 1; }
+ };
+
+ // ------------------------------------------------------------------------
+ // Generic compression/decompression routines.
diff --git a/other-licenses/snappy/README b/other-licenses/snappy/README
index 2538acab69..194f1cc835 100644
--- a/other-licenses/snappy/README
+++ b/other-licenses/snappy/README
@@ -1,17 +1,20 @@
See src/README for the README that ships with snappy.
Mozilla does not modify the actual snappy source with the exception of the
-'snappy-stubs-public.h' header. We have replaced its build system with our own.
+'snappy-stubs-public.h' header and one small patch to resolve implicit
+constructor warnings. We have replaced its build system with our own.
Snappy comes from:
https://github.com/google/snappy
-We are currently using revision: 1.1.9
+We are currently using revision: 1.2.0
To upgrade to a newer version:
- 1. Check out the new code using subversion.
+ 1. Check out the new code using git.
2. Update 'snappy-stubs-public.h' in this directory with any changes that were
- made to 'snappy-stubs-public.h.in' in the new source.
+ made to 'snappy-stubs-public.h.in' in the new source. Note that we don't
+ bother trying to detect the availability of sys/uio.h and unconditionally
+ define the iovec type instead for all platforms.
3. Copy the major/minor/patch versions from 'CMakeLists.txt' into
'snappy-stubs-public.h'.
4. Copy all source files from the new version into the src subdirectory. The
@@ -23,4 +26,5 @@ To upgrade to a newer version:
- 'testdata' subdirectory
- 'third_party' subdirectory
5. Update the revision stamp in this file.
+ 6. Apply 01-explicit.patch.
diff --git a/other-licenses/snappy/snappy-stubs-public.h b/other-licenses/snappy/snappy-stubs-public.h
index 42b690094c..4c9030bbe4 100644
--- a/other-licenses/snappy/snappy-stubs-public.h
+++ b/other-licenses/snappy/snappy-stubs-public.h
@@ -1,5 +1,4 @@
// Copyright 2011 Google Inc. All Rights Reserved.
-// Author: sesse@google.com (Steinar H. Gunderson)
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
@@ -39,8 +38,8 @@
#include <cstddef>
#define SNAPPY_MAJOR 1
-#define SNAPPY_MINOR 1
-#define SNAPPY_PATCHLEVEL 9
+#define SNAPPY_MINOR 2
+#define SNAPPY_PATCHLEVEL 0
#define SNAPPY_VERSION \
((SNAPPY_MAJOR << 16) | (SNAPPY_MINOR << 8) | SNAPPY_PATCHLEVEL)
diff --git a/other-licenses/snappy/src/CONTRIBUTING.md b/other-licenses/snappy/src/CONTRIBUTING.md
index d0ce551527..66a60d5c86 100644
--- a/other-licenses/snappy/src/CONTRIBUTING.md
+++ b/other-licenses/snappy/src/CONTRIBUTING.md
@@ -3,30 +3,10 @@
We'd love to accept your patches and contributions to this project. There are
just a few small guidelines you need to follow.
-## Project Goals
-
-In addition to the aims listed at the top of the [README](README.md) Snappy
-explicitly supports the following:
-
-1. C++11
-2. Clang (gcc and MSVC are best-effort).
-3. Low level optimizations (e.g. assembly or equivalent intrinsics) for:
- 1. [x86](https://en.wikipedia.org/wiki/X86)
- 2. [x86-64](https://en.wikipedia.org/wiki/X86-64)
- 3. ARMv7 (32-bit)
- 4. ARMv8 (AArch64)
-4. Supports only the Snappy compression scheme as described in
- [format_description.txt](format_description.txt).
-5. CMake for building
-
-Changes adding features or dependencies outside of the core area of focus listed
-above might not be accepted. If in doubt post a message to the
-[Snappy discussion mailing list](https://groups.google.com/g/snappy-compression).
-
## Contributor License Agreement
Contributions to this project must be accompanied by a Contributor License
-Agreement. You (or your employer) retain the copyright to your contribution,
+Agreement. You (or your employer) retain the copyright to your contribution;
this simply gives us permission to use and redistribute your contributions as
part of the project. Head over to <https://cla.developers.google.com/> to see
your current agreements on file or to sign a new one.
@@ -35,12 +15,17 @@ You generally only need to submit a CLA once, so if you've already submitted one
(even if it was for a different project), you probably don't need to do it
again.
-## Code reviews
+## Code Reviews
All submissions, including submissions by project members, require review. We
use GitHub pull requests for this purpose. Consult
[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
information on using pull requests.
-Please make sure that all the automated checks (CLA, AppVeyor, Travis) pass for
-your pull requests. Pull requests whose checks fail may be ignored.
+See [the README](README.md#contributing-to-the-snappy-project) for areas
+where we are likely to accept external contributions.
+
+## Community Guidelines
+
+This project follows [Google's Open Source Community
+Guidelines](https://opensource.google/conduct/).
diff --git a/other-licenses/snappy/src/NEWS b/other-licenses/snappy/src/NEWS
index 931a5e13fd..792a578001 100644
--- a/other-licenses/snappy/src/NEWS
+++ b/other-licenses/snappy/src/NEWS
@@ -1,3 +1,9 @@
+Snappy v1.1.10, Mar 8th 2023:
+
+ * Performance improvements
+
+ * Compilation fixes for various environments
+
Snappy v1.1.9, May 4th 2021:
* Performance improvements.
diff --git a/other-licenses/snappy/src/README.md b/other-licenses/snappy/src/README.md
index 8fd7dc09ed..398be7d58a 100644
--- a/other-licenses/snappy/src/README.md
+++ b/other-licenses/snappy/src/README.md
@@ -1,7 +1,6 @@
Snappy, a fast compressor/decompressor.
-[![Build Status](https://travis-ci.org/google/snappy.svg?branch=master)](https://travis-ci.org/google/snappy)
-[![Build status](https://ci.appveyor.com/api/projects/status/t9nubcqkwo8rw8yn/branch/master?svg=true)](https://ci.appveyor.com/project/pwnall/leveldb)
+[![Build Status](https://github.com/google/snappy/actions/workflows/build.yml/badge.svg)](https://github.com/google/snappy/actions/workflows/build.yml)
Introduction
============
@@ -90,13 +89,13 @@ your calling file, and link against the compiled library.
There are many ways to call Snappy, but the simplest possible is
-```cpp
+```c++
snappy::Compress(input.data(), input.size(), &output);
```
and similarly
-```cpp
+```c++
snappy::Uncompress(input.data(), input.size(), &output);
```
@@ -132,6 +131,32 @@ should provide a reasonably balanced starting point for benchmarking. (Note that
baddata[1-3].snappy are not intended as benchmarks; they are used to verify
correctness in the presence of corrupted data in the unit test.)
+Contributing to the Snappy Project
+==================================
+
+In addition to the aims listed at the top of the [README](README.md) Snappy
+explicitly supports the following:
+
+1. C++11
+2. Clang (gcc and MSVC are best-effort).
+3. Low level optimizations (e.g. assembly or equivalent intrinsics) for:
+ 1. [x86](https://en.wikipedia.org/wiki/X86)
+ 2. [x86-64](https://en.wikipedia.org/wiki/X86-64)
+ 3. ARMv7 (32-bit)
+ 4. ARMv8 (AArch64)
+4. Supports only the Snappy compression scheme as described in
+ [format_description.txt](format_description.txt).
+5. CMake for building
+
+Changes adding features or dependencies outside of the core area of focus listed
+above might not be accepted. If in doubt post a message to the
+[Snappy discussion mailing list](https://groups.google.com/g/snappy-compression).
+
+We are unlikely to accept contributions to the build configuration files, such
+as `CMakeLists.txt`. We are focused on maintaining a build configuration that
+allows us to test that the project works in a few supported configurations
+inside Google. We are not currently interested in supporting other requirements,
+such as different operating systems, compilers, or build systems.
Contact
=======
diff --git a/other-licenses/snappy/src/snappy-internal.h b/other-licenses/snappy/src/snappy-internal.h
index 720ccd8282..ae78247dbb 100644
--- a/other-licenses/snappy/src/snappy-internal.h
+++ b/other-licenses/snappy/src/snappy-internal.h
@@ -31,11 +31,88 @@
#ifndef THIRD_PARTY_SNAPPY_SNAPPY_INTERNAL_H_
#define THIRD_PARTY_SNAPPY_SNAPPY_INTERNAL_H_
+#include <utility>
+
#include "snappy-stubs-internal.h"
+#if SNAPPY_HAVE_SSSE3
+// Please do not replace with <x86intrin.h> or with headers that assume more
+// advanced SSE versions without checking with all the OWNERS.
+#include <emmintrin.h>
+#include <tmmintrin.h>
+#endif
+
+#if SNAPPY_HAVE_NEON
+#include <arm_neon.h>
+#endif
+
+#if SNAPPY_HAVE_SSSE3 || SNAPPY_HAVE_NEON
+#define SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE 1
+#else
+#define SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE 0
+#endif
+
namespace snappy {
namespace internal {
+#if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
+#if SNAPPY_HAVE_SSSE3
+using V128 = __m128i;
+#elif SNAPPY_HAVE_NEON
+using V128 = uint8x16_t;
+#endif
+
+// Load 128 bits of integer data. `src` must be 16-byte aligned.
+inline V128 V128_Load(const V128* src);
+
+// Load 128 bits of integer data. `src` does not need to be aligned.
+inline V128 V128_LoadU(const V128* src);
+
+// Store 128 bits of integer data. `dst` does not need to be aligned.
+inline void V128_StoreU(V128* dst, V128 val);
+
+// Shuffle packed 8-bit integers using a shuffle mask.
+// Each packed integer in the shuffle mask must be in [0,16).
+inline V128 V128_Shuffle(V128 input, V128 shuffle_mask);
+
+// Constructs V128 with 16 chars |c|.
+inline V128 V128_DupChar(char c);
+
+#if SNAPPY_HAVE_SSSE3
+inline V128 V128_Load(const V128* src) { return _mm_load_si128(src); }
+
+inline V128 V128_LoadU(const V128* src) { return _mm_loadu_si128(src); }
+
+inline void V128_StoreU(V128* dst, V128 val) { _mm_storeu_si128(dst, val); }
+
+inline V128 V128_Shuffle(V128 input, V128 shuffle_mask) {
+ return _mm_shuffle_epi8(input, shuffle_mask);
+}
+
+inline V128 V128_DupChar(char c) { return _mm_set1_epi8(c); }
+
+#elif SNAPPY_HAVE_NEON
+inline V128 V128_Load(const V128* src) {
+ return vld1q_u8(reinterpret_cast<const uint8_t*>(src));
+}
+
+inline V128 V128_LoadU(const V128* src) {
+ return vld1q_u8(reinterpret_cast<const uint8_t*>(src));
+}
+
+inline void V128_StoreU(V128* dst, V128 val) {
+ vst1q_u8(reinterpret_cast<uint8_t*>(dst), val);
+}
+
+inline V128 V128_Shuffle(V128 input, V128 shuffle_mask) {
+ assert(vminvq_u8(shuffle_mask) >= 0 && vmaxvq_u8(shuffle_mask) <= 15);
+ return vqtbl1q_u8(input, shuffle_mask);
+}
+
+inline V128 V128_DupChar(char c) { return vdupq_n_u8(c); }
+#endif
+#endif // SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
+
// Working memory performs a single allocation to hold all scratch space
// required for compression.
class WorkingMemory {
@@ -95,8 +172,9 @@ char* CompressFragment(const char* input,
// loading from s2 + n.
//
// Separate implementation for 64-bit, little-endian cpus.
-#if !defined(SNAPPY_IS_BIG_ENDIAN) && \
- (defined(__x86_64__) || defined(_M_X64) || defined(ARCH_PPC) || defined(ARCH_ARM))
+#if !SNAPPY_IS_BIG_ENDIAN && \
+ (defined(__x86_64__) || defined(_M_X64) || defined(ARCH_PPC) || \
+ defined(ARCH_ARM))
static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
const char* s2,
const char* s2_limit,
@@ -154,8 +232,9 @@ static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
uint64_t xorval = a1 ^ a2;
int shift = Bits::FindLSBSetNonZero64(xorval);
size_t matched_bytes = shift >> 3;
+ uint64_t a3 = UNALIGNED_LOAD64(s2 + 4);
#ifndef __x86_64__
- *data = UNALIGNED_LOAD64(s2 + matched_bytes);
+ a2 = static_cast<uint32_t>(xorval) == 0 ? a3 : a2;
#else
// Ideally this would just be
//
@@ -166,19 +245,21 @@ static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
// use a conditional move (it's tuned to cut data dependencies). In this
// case there is a longer parallel chain anyway AND this will be fairly
// unpredictable.
- uint64_t a3 = UNALIGNED_LOAD64(s2 + 4);
asm("testl %k2, %k2\n\t"
"cmovzq %1, %0\n\t"
: "+r"(a2)
- : "r"(a3), "r"(xorval));
- *data = a2 >> (shift & (3 * 8));
+ : "r"(a3), "r"(xorval)
+ : "cc");
#endif
+ *data = a2 >> (shift & (3 * 8));
return std::pair<size_t, bool>(matched_bytes, true);
} else {
matched = 8;
s2 += 8;
}
}
+ SNAPPY_PREFETCH(s1 + 64);
+ SNAPPY_PREFETCH(s2 + 64);
// Find out how long the match is. We loop over the data 64 bits at a
// time until we find a 64-bit block that doesn't match; then we find
@@ -194,16 +275,17 @@ static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
uint64_t xorval = a1 ^ a2;
int shift = Bits::FindLSBSetNonZero64(xorval);
size_t matched_bytes = shift >> 3;
+ uint64_t a3 = UNALIGNED_LOAD64(s2 + 4);
#ifndef __x86_64__
- *data = UNALIGNED_LOAD64(s2 + matched_bytes);
+ a2 = static_cast<uint32_t>(xorval) == 0 ? a3 : a2;
#else
- uint64_t a3 = UNALIGNED_LOAD64(s2 + 4);
asm("testl %k2, %k2\n\t"
"cmovzq %1, %0\n\t"
: "+r"(a2)
- : "r"(a3), "r"(xorval));
- *data = a2 >> (shift & (3 * 8));
+ : "r"(a3), "r"(xorval)
+ : "cc");
#endif
+ *data = a2 >> (shift & (3 * 8));
matched += matched_bytes;
assert(matched >= 8);
return std::pair<size_t, bool>(matched, false);
@@ -252,6 +334,31 @@ static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
}
#endif
+static inline size_t FindMatchLengthPlain(const char* s1, const char* s2,
+ const char* s2_limit) {
+ // Implementation based on the x86-64 version, above.
+ assert(s2_limit >= s2);
+ int matched = 0;
+
+ while (s2 <= s2_limit - 8 &&
+ UNALIGNED_LOAD64(s2) == UNALIGNED_LOAD64(s1 + matched)) {
+ s2 += 8;
+ matched += 8;
+ }
+ if (LittleEndian::IsLittleEndian() && s2 <= s2_limit - 8) {
+ uint64_t x = UNALIGNED_LOAD64(s2) ^ UNALIGNED_LOAD64(s1 + matched);
+ int matching_bits = Bits::FindLSBSetNonZero64(x);
+ matched += matching_bits >> 3;
+ s2 += matching_bits >> 3;
+ } else {
+ while ((s2 < s2_limit) && (s1[matched] == *s2)) {
+ ++s2;
+ ++matched;
+ }
+ }
+ return matched;
+}
+
// Lookup tables for decompression code. Give --snappy_dump_decompression_table
// to the unit test to recompute char_table.
diff --git a/other-licenses/snappy/src/snappy-stubs-internal.h b/other-licenses/snappy/src/snappy-stubs-internal.h
index c2a838f38f..526c38b700 100644
--- a/other-licenses/snappy/src/snappy-stubs-internal.h
+++ b/other-licenses/snappy/src/snappy-stubs-internal.h
@@ -31,7 +31,7 @@
#ifndef THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
#define THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
-#ifdef HAVE_CONFIG_H
+#if HAVE_CONFIG_H
#include "config.h"
#endif
@@ -43,11 +43,11 @@
#include <limits>
#include <string>
-#ifdef HAVE_SYS_MMAN_H
+#if HAVE_SYS_MMAN_H
#include <sys/mman.h>
#endif
-#ifdef HAVE_UNISTD_H
+#if HAVE_UNISTD_H
#include <unistd.h>
#endif
@@ -90,19 +90,25 @@
#define ARRAYSIZE(a) int{sizeof(a) / sizeof(*(a))}
// Static prediction hints.
-#ifdef HAVE_BUILTIN_EXPECT
+#if HAVE_BUILTIN_EXPECT
#define SNAPPY_PREDICT_FALSE(x) (__builtin_expect(x, 0))
#define SNAPPY_PREDICT_TRUE(x) (__builtin_expect(!!(x), 1))
#else
#define SNAPPY_PREDICT_FALSE(x) x
#define SNAPPY_PREDICT_TRUE(x) x
-#endif
+#endif // HAVE_BUILTIN_EXPECT
// Inlining hints.
-#ifdef HAVE_ATTRIBUTE_ALWAYS_INLINE
+#if HAVE_ATTRIBUTE_ALWAYS_INLINE
#define SNAPPY_ATTRIBUTE_ALWAYS_INLINE __attribute__((always_inline))
#else
#define SNAPPY_ATTRIBUTE_ALWAYS_INLINE
+#endif // HAVE_ATTRIBUTE_ALWAYS_INLINE
+
+#if HAVE_BUILTIN_PREFETCH
+#define SNAPPY_PREFETCH(ptr) __builtin_prefetch(ptr, 0, 3)
+#else
+#define SNAPPY_PREFETCH(ptr) (void)(ptr)
#endif
// Stubbed version of ABSL_FLAG.
@@ -171,27 +177,42 @@ class LittleEndian {
public:
// Functions to do unaligned loads and stores in little-endian order.
static inline uint16_t Load16(const void *ptr) {
- const uint8_t* const buffer = reinterpret_cast<const uint8_t*>(ptr);
-
// Compiles to a single mov/str on recent clang and gcc.
+#if SNAPPY_IS_BIG_ENDIAN
+ const uint8_t* const buffer = reinterpret_cast<const uint8_t*>(ptr);
return (static_cast<uint16_t>(buffer[0])) |
(static_cast<uint16_t>(buffer[1]) << 8);
+#else
+ // memcpy() turns into a single instruction early in the optimization
+ // pipeline (relatively to a series of byte accesses). So, using memcpy
+ // instead of byte accesses may lead to better decisions in more stages of
+ // the optimization pipeline.
+ uint16_t value;
+ std::memcpy(&value, ptr, 2);
+ return value;
+#endif
}
static inline uint32_t Load32(const void *ptr) {
- const uint8_t* const buffer = reinterpret_cast<const uint8_t*>(ptr);
-
// Compiles to a single mov/str on recent clang and gcc.
+#if SNAPPY_IS_BIG_ENDIAN
+ const uint8_t* const buffer = reinterpret_cast<const uint8_t*>(ptr);
return (static_cast<uint32_t>(buffer[0])) |
(static_cast<uint32_t>(buffer[1]) << 8) |
(static_cast<uint32_t>(buffer[2]) << 16) |
(static_cast<uint32_t>(buffer[3]) << 24);
+#else
+ // See Load16() for the rationale of using memcpy().
+ uint32_t value;
+ std::memcpy(&value, ptr, 4);
+ return value;
+#endif
}
static inline uint64_t Load64(const void *ptr) {
- const uint8_t* const buffer = reinterpret_cast<const uint8_t*>(ptr);
-
// Compiles to a single mov/str on recent clang and gcc.
+#if SNAPPY_IS_BIG_ENDIAN
+ const uint8_t* const buffer = reinterpret_cast<const uint8_t*>(ptr);
return (static_cast<uint64_t>(buffer[0])) |
(static_cast<uint64_t>(buffer[1]) << 8) |
(static_cast<uint64_t>(buffer[2]) << 16) |
@@ -200,30 +221,44 @@ class LittleEndian {
(static_cast<uint64_t>(buffer[5]) << 40) |
(static_cast<uint64_t>(buffer[6]) << 48) |
(static_cast<uint64_t>(buffer[7]) << 56);
+#else
+ // See Load16() for the rationale of using memcpy().
+ uint64_t value;
+ std::memcpy(&value, ptr, 8);
+ return value;
+#endif
}
static inline void Store16(void *dst, uint16_t value) {
- uint8_t* const buffer = reinterpret_cast<uint8_t*>(dst);
-
// Compiles to a single mov/str on recent clang and gcc.
+#if SNAPPY_IS_BIG_ENDIAN
+ uint8_t* const buffer = reinterpret_cast<uint8_t*>(dst);
buffer[0] = static_cast<uint8_t>(value);
buffer[1] = static_cast<uint8_t>(value >> 8);
+#else
+ // See Load16() for the rationale of using memcpy().
+ std::memcpy(dst, &value, 2);
+#endif
}
static void Store32(void *dst, uint32_t value) {
- uint8_t* const buffer = reinterpret_cast<uint8_t*>(dst);
-
// Compiles to a single mov/str on recent clang and gcc.
+#if SNAPPY_IS_BIG_ENDIAN
+ uint8_t* const buffer = reinterpret_cast<uint8_t*>(dst);
buffer[0] = static_cast<uint8_t>(value);
buffer[1] = static_cast<uint8_t>(value >> 8);
buffer[2] = static_cast<uint8_t>(value >> 16);
buffer[3] = static_cast<uint8_t>(value >> 24);
+#else
+ // See Load16() for the rationale of using memcpy().
+ std::memcpy(dst, &value, 4);
+#endif
}
static void Store64(void* dst, uint64_t value) {
- uint8_t* const buffer = reinterpret_cast<uint8_t*>(dst);
-
// Compiles to a single mov/str on recent clang and gcc.
+#if SNAPPY_IS_BIG_ENDIAN
+ uint8_t* const buffer = reinterpret_cast<uint8_t*>(dst);
buffer[0] = static_cast<uint8_t>(value);
buffer[1] = static_cast<uint8_t>(value >> 8);
buffer[2] = static_cast<uint8_t>(value >> 16);
@@ -232,14 +267,18 @@ class LittleEndian {
buffer[5] = static_cast<uint8_t>(value >> 40);
buffer[6] = static_cast<uint8_t>(value >> 48);
buffer[7] = static_cast<uint8_t>(value >> 56);
+#else
+ // See Load16() for the rationale of using memcpy().
+ std::memcpy(dst, &value, 8);
+#endif
}
static inline constexpr bool IsLittleEndian() {
-#if defined(SNAPPY_IS_BIG_ENDIAN)
+#if SNAPPY_IS_BIG_ENDIAN
return false;
#else
return true;
-#endif // defined(SNAPPY_IS_BIG_ENDIAN)
+#endif // SNAPPY_IS_BIG_ENDIAN
}
};
@@ -265,7 +304,7 @@ class Bits {
void operator=(const Bits&);
};
-#if defined(HAVE_BUILTIN_CTZ)
+#if HAVE_BUILTIN_CTZ
inline int Bits::Log2FloorNonZero(uint32_t n) {
assert(n != 0);
@@ -354,7 +393,7 @@ inline int Bits::FindLSBSetNonZero(uint32_t n) {
#endif // End portable versions.
-#if defined(HAVE_BUILTIN_CTZ)
+#if HAVE_BUILTIN_CTZ
inline int Bits::FindLSBSetNonZero64(uint64_t n) {
assert(n != 0);
@@ -388,7 +427,7 @@ inline int Bits::FindLSBSetNonZero64(uint64_t n) {
}
}
-#endif // End portable version.
+#endif // HAVE_BUILTIN_CTZ
// Variable-length integer encoding.
class Varint {
diff --git a/other-licenses/snappy/src/snappy-test.cc b/other-licenses/snappy/src/snappy-test.cc
index 7eb490ac17..aae607210b 100644
--- a/other-licenses/snappy/src/snappy-test.cc
+++ b/other-licenses/snappy/src/snappy-test.cc
@@ -151,7 +151,7 @@ LogMessageCrash::~LogMessageCrash() {
#pragma warning(pop)
#endif
-#ifdef HAVE_LIBZ
+#if HAVE_LIBZ
ZLib::ZLib()
: comp_init_(false),
diff --git a/other-licenses/snappy/src/snappy-test.h b/other-licenses/snappy/src/snappy-test.h
index f80d343377..65f3725744 100644
--- a/other-licenses/snappy/src/snappy-test.h
+++ b/other-licenses/snappy/src/snappy-test.h
@@ -31,25 +31,25 @@
#ifndef THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_TEST_H_
#define THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_TEST_H_
-#ifdef HAVE_CONFIG_H
+#if HAVE_CONFIG_H
#include "config.h"
#endif
#include "snappy-stubs-internal.h"
-#ifdef HAVE_SYS_MMAN_H
+#if HAVE_SYS_MMAN_H
#include <sys/mman.h>
#endif
-#ifdef HAVE_SYS_RESOURCE_H
+#if HAVE_SYS_RESOURCE_H
#include <sys/resource.h>
#endif
-#ifdef HAVE_SYS_TIME_H
+#if HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
-#ifdef HAVE_WINDOWS_H
+#if HAVE_WINDOWS_H
// Needed to be able to use std::max without workarounds in the source code.
// https://support.microsoft.com/en-us/help/143208/prb-using-stl-in-windows-program-can-cause-min-max-conflicts
#define NOMINMAX
@@ -58,15 +58,15 @@
#define InitGoogle(argv0, argc, argv, remove_flags) ((void)(0))
-#ifdef HAVE_LIBZ
+#if HAVE_LIBZ
#include "zlib.h"
#endif
-#ifdef HAVE_LIBLZO2
+#if HAVE_LIBLZO2
#include "lzo/lzo1x.h"
#endif
-#ifdef HAVE_LIBLZ4
+#if HAVE_LIBLZ4
#include "lz4.h"
#endif
@@ -216,7 +216,7 @@ class LogMessageVoidify {
#define CHECK_GT(a, b) CRASH_UNLESS((a) > (b))
#define CHECK_OK(cond) (cond).ok()
-#ifdef HAVE_LIBZ
+#if HAVE_LIBZ
// Object-oriented wrapper around zlib.
class ZLib {
diff --git a/other-licenses/snappy/src/snappy.cc b/other-licenses/snappy/src/snappy.cc
index 57df3f11fc..08c2a9889f 100644
--- a/other-licenses/snappy/src/snappy.cc
+++ b/other-licenses/snappy/src/snappy.cc
@@ -29,18 +29,6 @@
#include "snappy-internal.h"
#include "snappy-sinksource.h"
#include "snappy.h"
-
-#if !defined(SNAPPY_HAVE_SSSE3)
-// __SSSE3__ is defined by GCC and Clang. Visual Studio doesn't target SIMD
-// support between SSE2 and AVX (so SSSE3 instructions require AVX support), and
-// defines __AVX__ when AVX support is available.
-#if defined(__SSSE3__) || defined(__AVX__)
-#define SNAPPY_HAVE_SSSE3 1
-#else
-#define SNAPPY_HAVE_SSSE3 0
-#endif
-#endif // !defined(SNAPPY_HAVE_SSSE3)
-
#if !defined(SNAPPY_HAVE_BMI2)
// __BMI2__ is defined by GCC and Clang. Visual Studio doesn't target BMI2
// specifically, but it does define __AVX2__ when AVX2 support is available.
@@ -56,16 +44,28 @@
#endif
#endif // !defined(SNAPPY_HAVE_BMI2)
-#if SNAPPY_HAVE_SSSE3
-// Please do not replace with <x86intrin.h>. or with headers that assume more
-// advanced SSE versions without checking with all the OWNERS.
-#include <tmmintrin.h>
+#if !defined(SNAPPY_HAVE_X86_CRC32)
+#if defined(__SSE4_2__)
+#define SNAPPY_HAVE_X86_CRC32 1
+#else
+#define SNAPPY_HAVE_X86_CRC32 0
#endif
+#endif // !defined(SNAPPY_HAVE_X86_CRC32)
-#if SNAPPY_HAVE_BMI2
+#if !defined(SNAPPY_HAVE_NEON_CRC32)
+#if SNAPPY_HAVE_NEON && defined(__ARM_FEATURE_CRC32)
+#define SNAPPY_HAVE_NEON_CRC32 1
+#else
+#define SNAPPY_HAVE_NEON_CRC32 0
+#endif
+#endif // !defined(SNAPPY_HAVE_NEON_CRC32)
+
+#if SNAPPY_HAVE_BMI2 || SNAPPY_HAVE_X86_CRC32
// Please do not replace with <x86intrin.h>. or with headers that assume more
// advanced SSE versions without checking with all the OWNERS.
#include <immintrin.h>
+#elif SNAPPY_HAVE_NEON_CRC32
+#include <arm_acle.h>
#endif
#include <algorithm>
@@ -74,6 +74,7 @@
#include <cstdint>
#include <cstdio>
#include <cstring>
+#include <memory>
#include <string>
#include <utility>
#include <vector>
@@ -91,6 +92,14 @@ using internal::COPY_2_BYTE_OFFSET;
using internal::COPY_4_BYTE_OFFSET;
using internal::kMaximumTagLength;
using internal::LITERAL;
+#if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
+using internal::V128;
+using internal::V128_Load;
+using internal::V128_LoadU;
+using internal::V128_Shuffle;
+using internal::V128_StoreU;
+using internal::V128_DupChar;
+#endif
// We translate the information encoded in a tag through a lookup table to a
// format that requires fewer instructions to decode. Effectively we store
@@ -133,21 +142,53 @@ constexpr std::array<int16_t, 256> MakeTable(index_sequence<seq...>) {
return std::array<int16_t, 256>{LengthMinusOffset(seq)...};
}
-// We maximally co-locate the two tables so that only one register needs to be
-// reserved for the table address.
-struct {
- alignas(64) const std::array<int16_t, 256> length_minus_offset;
- uint32_t extract_masks[4]; // Used for extracting offset based on tag type.
-} table = {MakeTable(make_index_sequence<256>{}), {0, 0xFF, 0xFFFF, 0}};
-
-// Any hash function will produce a valid compressed bitstream, but a good
-// hash function reduces the number of collisions and thus yields better
-// compression for compressible input, and more speed for incompressible
-// input. Of course, it doesn't hurt if the hash function is reasonably fast
-// either, as it gets called a lot.
-inline uint32_t HashBytes(uint32_t bytes, uint32_t mask) {
+alignas(64) const std::array<int16_t, 256> kLengthMinusOffset =
+ MakeTable(make_index_sequence<256>{});
+
+// Given a table of uint16_t whose size is mask / 2 + 1, return a pointer to the
+// relevant entry, if any, for the given bytes. Any hash function will do,
+// but a good hash function reduces the number of collisions and thus yields
+// better compression for compressible input.
+//
+// REQUIRES: mask is 2 * (table_size - 1), and table_size is a power of two.
+inline uint16_t* TableEntry(uint16_t* table, uint32_t bytes, uint32_t mask) {
+ // Our choice is quicker-and-dirtier than the typical hash function;
+ // empirically, that seems beneficial. The upper bits of kMagic * bytes are a
+ // higher-quality hash than the lower bits, so when using kMagic * bytes we
+ // also shift right to get a higher-quality end result. There's no similar
+ // issue with a CRC because all of the output bits of a CRC are equally good
+ // "hashes." So, a CPU instruction for CRC, if available, tends to be a good
+ // choice.
+#if SNAPPY_HAVE_NEON_CRC32
+ // We use mask as the second arg to the CRC function, as it's about to
+ // be used anyway; it'd be equally correct to use 0 or some constant.
+ // Mathematically, _mm_crc32_u32 (or similar) is a function of the
+ // xor of its arguments.
+ const uint32_t hash = __crc32cw(bytes, mask);
+#elif SNAPPY_HAVE_X86_CRC32
+ const uint32_t hash = _mm_crc32_u32(bytes, mask);
+#else
constexpr uint32_t kMagic = 0x1e35a7bd;
- return ((kMagic * bytes) >> (32 - kMaxHashTableBits)) & mask;
+ const uint32_t hash = (kMagic * bytes) >> (31 - kMaxHashTableBits);
+#endif
+ return reinterpret_cast<uint16_t*>(reinterpret_cast<uintptr_t>(table) +
+ (hash & mask));
+}
+
+inline uint16_t* TableEntry4ByteMatch(uint16_t* table, uint32_t bytes,
+ uint32_t mask) {
+ constexpr uint32_t kMagic = 2654435761U;
+ const uint32_t hash = (kMagic * bytes) >> (32 - kMaxHashTableBits);
+ return reinterpret_cast<uint16_t*>(reinterpret_cast<uintptr_t>(table) +
+ (hash & mask));
+}
+
+inline uint16_t* TableEntry8ByteMatch(uint16_t* table, uint64_t bytes,
+ uint32_t mask) {
+ constexpr uint64_t kMagic = 58295818150454627ULL;
+ const uint32_t hash = (kMagic * bytes) >> (64 - kMaxHashTableBits);
+ return reinterpret_cast<uint16_t*>(reinterpret_cast<uintptr_t>(table) +
+ (hash & mask));
}
} // namespace
@@ -228,7 +269,7 @@ inline char* IncrementalCopySlow(const char* src, char* op,
return op_limit;
}
-#if SNAPPY_HAVE_SSSE3
+#if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
// Computes the bytes for shuffle control mask (please read comments on
// 'pattern_generation_masks' as well) for the given index_offset and
@@ -248,19 +289,19 @@ inline constexpr std::array<char, sizeof...(indexes)> MakePatternMaskBytes(
// Computes the shuffle control mask bytes array for given pattern-sizes and
// returns an array.
template <size_t... pattern_sizes_minus_one>
-inline constexpr std::array<std::array<char, sizeof(__m128i)>,
+inline constexpr std::array<std::array<char, sizeof(V128)>,
sizeof...(pattern_sizes_minus_one)>
MakePatternMaskBytesTable(int index_offset,
index_sequence<pattern_sizes_minus_one...>) {
- return {MakePatternMaskBytes(
- index_offset, pattern_sizes_minus_one + 1,
- make_index_sequence</*indexes=*/sizeof(__m128i)>())...};
+ return {
+ MakePatternMaskBytes(index_offset, pattern_sizes_minus_one + 1,
+ make_index_sequence</*indexes=*/sizeof(V128)>())...};
}
// This is an array of shuffle control masks that can be used as the source
// operand for PSHUFB to permute the contents of the destination XMM register
// into a repeating byte pattern.
-alignas(16) constexpr std::array<std::array<char, sizeof(__m128i)>,
+alignas(16) constexpr std::array<std::array<char, sizeof(V128)>,
16> pattern_generation_masks =
MakePatternMaskBytesTable(
/*index_offset=*/0,
@@ -271,40 +312,40 @@ alignas(16) constexpr std::array<std::array<char, sizeof(__m128i)>,
// Basically, pattern_reshuffle_masks is a continuation of
// pattern_generation_masks. It follows that, pattern_reshuffle_masks is same as
// pattern_generation_masks for offsets 1, 2, 4, 8 and 16.
-alignas(16) constexpr std::array<std::array<char, sizeof(__m128i)>,
+alignas(16) constexpr std::array<std::array<char, sizeof(V128)>,
16> pattern_reshuffle_masks =
MakePatternMaskBytesTable(
/*index_offset=*/16,
/*pattern_sizes_minus_one=*/make_index_sequence<16>());
SNAPPY_ATTRIBUTE_ALWAYS_INLINE
-static inline __m128i LoadPattern(const char* src, const size_t pattern_size) {
- __m128i generation_mask = _mm_load_si128(reinterpret_cast<const __m128i*>(
+static inline V128 LoadPattern(const char* src, const size_t pattern_size) {
+ V128 generation_mask = V128_Load(reinterpret_cast<const V128*>(
pattern_generation_masks[pattern_size - 1].data()));
// Uninitialized bytes are masked out by the shuffle mask.
// TODO: remove annotation and macro defs once MSan is fixed.
SNAPPY_ANNOTATE_MEMORY_IS_INITIALIZED(src + pattern_size, 16 - pattern_size);
- return _mm_shuffle_epi8(
- _mm_loadu_si128(reinterpret_cast<const __m128i*>(src)), generation_mask);
+ return V128_Shuffle(V128_LoadU(reinterpret_cast<const V128*>(src)),
+ generation_mask);
}
SNAPPY_ATTRIBUTE_ALWAYS_INLINE
-static inline std::pair<__m128i /* pattern */, __m128i /* reshuffle_mask */>
+static inline std::pair<V128 /* pattern */, V128 /* reshuffle_mask */>
LoadPatternAndReshuffleMask(const char* src, const size_t pattern_size) {
- __m128i pattern = LoadPattern(src, pattern_size);
+ V128 pattern = LoadPattern(src, pattern_size);
// This mask will generate the next 16 bytes in-place. Doing so enables us to
- // write data by at most 4 _mm_storeu_si128.
+ // write data by at most 4 V128_StoreU.
//
// For example, suppose pattern is: abcdefabcdefabcd
// Shuffling with this mask will generate: efabcdefabcdefab
// Shuffling again will generate: cdefabcdefabcdef
- __m128i reshuffle_mask = _mm_load_si128(reinterpret_cast<const __m128i*>(
+ V128 reshuffle_mask = V128_Load(reinterpret_cast<const V128*>(
pattern_reshuffle_masks[pattern_size - 1].data()));
return {pattern, reshuffle_mask};
}
-#endif // SNAPPY_HAVE_SSSE3
+#endif // SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
// Fallback for when we need to copy while extending the pattern, for example
// copying 10 bytes from 3 positions back abc -> abcabcabcabca.
@@ -312,33 +353,38 @@ LoadPatternAndReshuffleMask(const char* src, const size_t pattern_size) {
// REQUIRES: [dst - offset, dst + 64) is a valid address range.
SNAPPY_ATTRIBUTE_ALWAYS_INLINE
static inline bool Copy64BytesWithPatternExtension(char* dst, size_t offset) {
-#if SNAPPY_HAVE_SSSE3
+#if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
if (SNAPPY_PREDICT_TRUE(offset <= 16)) {
switch (offset) {
case 0:
return false;
case 1: {
- std::memset(dst, dst[-1], 64);
+ // TODO: Ideally we should memset, move back once the
+ // codegen issues are fixed.
+ V128 pattern = V128_DupChar(dst[-1]);
+ for (int i = 0; i < 4; i++) {
+ V128_StoreU(reinterpret_cast<V128*>(dst + 16 * i), pattern);
+ }
return true;
}
case 2:
case 4:
case 8:
case 16: {
- __m128i pattern = LoadPattern(dst - offset, offset);
+ V128 pattern = LoadPattern(dst - offset, offset);
for (int i = 0; i < 4; i++) {
- _mm_storeu_si128(reinterpret_cast<__m128i*>(dst + 16 * i), pattern);
+ V128_StoreU(reinterpret_cast<V128*>(dst + 16 * i), pattern);
}
return true;
}
default: {
auto pattern_and_reshuffle_mask =
LoadPatternAndReshuffleMask(dst - offset, offset);
- __m128i pattern = pattern_and_reshuffle_mask.first;
- __m128i reshuffle_mask = pattern_and_reshuffle_mask.second;
+ V128 pattern = pattern_and_reshuffle_mask.first;
+ V128 reshuffle_mask = pattern_and_reshuffle_mask.second;
for (int i = 0; i < 4; i++) {
- _mm_storeu_si128(reinterpret_cast<__m128i*>(dst + 16 * i), pattern);
- pattern = _mm_shuffle_epi8(pattern, reshuffle_mask);
+ V128_StoreU(reinterpret_cast<V128*>(dst + 16 * i), pattern);
+ pattern = V128_Shuffle(pattern, reshuffle_mask);
}
return true;
}
@@ -348,6 +394,7 @@ static inline bool Copy64BytesWithPatternExtension(char* dst, size_t offset) {
if (SNAPPY_PREDICT_TRUE(offset < 16)) {
if (SNAPPY_PREDICT_FALSE(offset == 0)) return false;
// Extend the pattern to the first 16 bytes.
+ // The simpler formulation of `dst[i - offset]` induces undefined behavior.
for (int i = 0; i < 16; i++) dst[i] = (dst - offset)[i];
// Find a multiple of pattern >= 16.
static std::array<uint8_t, 16> pattern_sizes = []() {
@@ -361,7 +408,7 @@ static inline bool Copy64BytesWithPatternExtension(char* dst, size_t offset) {
}
return true;
}
-#endif // SNAPPY_HAVE_SSSE3
+#endif // SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
// Very rare.
for (int i = 0; i < 4; i++) {
@@ -375,7 +422,7 @@ static inline bool Copy64BytesWithPatternExtension(char* dst, size_t offset) {
// region of the buffer.
inline char* IncrementalCopy(const char* src, char* op, char* const op_limit,
char* const buf_limit) {
-#if SNAPPY_HAVE_SSSE3
+#if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
constexpr int big_pattern_size_lower_bound = 16;
#else
constexpr int big_pattern_size_lower_bound = 8;
@@ -425,14 +472,14 @@ inline char* IncrementalCopy(const char* src, char* op, char* const op_limit,
// Handle the uncommon case where pattern is less than 16 (or 8 in non-SSE)
// bytes.
if (pattern_size < big_pattern_size_lower_bound) {
-#if SNAPPY_HAVE_SSSE3
+#if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
// Load the first eight bytes into an 128-bit XMM register, then use PSHUFB
// to permute the register's contents in-place into a repeating sequence of
// the first "pattern_size" bytes.
// For example, suppose:
// src == "abc"
// op == op + 3
- // After _mm_shuffle_epi8(), "pattern" will have five copies of "abc"
+ // After V128_Shuffle(), "pattern" will have five copies of "abc"
// followed by one byte of slop: abcabcabcabcabca.
//
// The non-SSE fallback implementation suffers from store-forwarding stalls
@@ -444,26 +491,26 @@ inline char* IncrementalCopy(const char* src, char* op, char* const op_limit,
if (SNAPPY_PREDICT_TRUE(op_limit <= buf_limit - 15)) {
auto pattern_and_reshuffle_mask =
LoadPatternAndReshuffleMask(src, pattern_size);
- __m128i pattern = pattern_and_reshuffle_mask.first;
- __m128i reshuffle_mask = pattern_and_reshuffle_mask.second;
+ V128 pattern = pattern_and_reshuffle_mask.first;
+ V128 reshuffle_mask = pattern_and_reshuffle_mask.second;
// There is at least one, and at most four 16-byte blocks. Writing four
// conditionals instead of a loop allows FDO to layout the code with
// respect to the actual probabilities of each length.
// TODO: Replace with loop with trip count hint.
- _mm_storeu_si128(reinterpret_cast<__m128i*>(op), pattern);
+ V128_StoreU(reinterpret_cast<V128*>(op), pattern);
if (op + 16 < op_limit) {
- pattern = _mm_shuffle_epi8(pattern, reshuffle_mask);
- _mm_storeu_si128(reinterpret_cast<__m128i*>(op + 16), pattern);
+ pattern = V128_Shuffle(pattern, reshuffle_mask);
+ V128_StoreU(reinterpret_cast<V128*>(op + 16), pattern);
}
if (op + 32 < op_limit) {
- pattern = _mm_shuffle_epi8(pattern, reshuffle_mask);
- _mm_storeu_si128(reinterpret_cast<__m128i*>(op + 32), pattern);
+ pattern = V128_Shuffle(pattern, reshuffle_mask);
+ V128_StoreU(reinterpret_cast<V128*>(op + 32), pattern);
}
if (op + 48 < op_limit) {
- pattern = _mm_shuffle_epi8(pattern, reshuffle_mask);
- _mm_storeu_si128(reinterpret_cast<__m128i*>(op + 48), pattern);
+ pattern = V128_Shuffle(pattern, reshuffle_mask);
+ V128_StoreU(reinterpret_cast<V128*>(op + 48), pattern);
}
return op_limit;
}
@@ -471,8 +518,8 @@ inline char* IncrementalCopy(const char* src, char* op, char* const op_limit,
if (SNAPPY_PREDICT_TRUE(op < op_end)) {
auto pattern_and_reshuffle_mask =
LoadPatternAndReshuffleMask(src, pattern_size);
- __m128i pattern = pattern_and_reshuffle_mask.first;
- __m128i reshuffle_mask = pattern_and_reshuffle_mask.second;
+ V128 pattern = pattern_and_reshuffle_mask.first;
+ V128 reshuffle_mask = pattern_and_reshuffle_mask.second;
// This code path is relatively cold however so we save code size
// by avoiding unrolling and vectorizing.
@@ -483,13 +530,13 @@ inline char* IncrementalCopy(const char* src, char* op, char* const op_limit,
#pragma clang loop unroll(disable)
#endif
do {
- _mm_storeu_si128(reinterpret_cast<__m128i*>(op), pattern);
- pattern = _mm_shuffle_epi8(pattern, reshuffle_mask);
+ V128_StoreU(reinterpret_cast<V128*>(op), pattern);
+ pattern = V128_Shuffle(pattern, reshuffle_mask);
op += 16;
} while (SNAPPY_PREDICT_TRUE(op < op_end));
}
return IncrementalCopySlow(op - pattern_size, op, op_limit);
-#else // !SNAPPY_HAVE_SSSE3
+#else // !SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
// If plenty of buffer space remains, expand the pattern to at least 8
// bytes. The way the following loop is written, we need 8 bytes of buffer
// space if pattern_size >= 4, 11 bytes if pattern_size is 1 or 3, and 10
@@ -506,7 +553,7 @@ inline char* IncrementalCopy(const char* src, char* op, char* const op_limit,
} else {
return IncrementalCopySlow(src, op, op_limit);
}
-#endif // SNAPPY_HAVE_SSSE3
+#endif // SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
}
assert(pattern_size >= big_pattern_size_lower_bound);
constexpr bool use_16bytes_chunk = big_pattern_size_lower_bound == 16;
@@ -599,7 +646,19 @@ static inline char* EmitLiteral(char* op, const char* literal, int len) {
LittleEndian::Store32(op, n);
op += count;
}
- std::memcpy(op, literal, len);
+ // When allow_fast_path is true, we can overwrite up to 16 bytes.
+ if (allow_fast_path) {
+ char* destination = op;
+ const char* source = literal;
+ const char* end = destination + len;
+ do {
+ std::memcpy(destination, source, 16);
+ destination += 16;
+ source += 16;
+ } while (destination < end);
+ } else {
+ std::memcpy(op, literal, len);
+ }
return op + len;
}
@@ -734,7 +793,7 @@ char* CompressFragment(const char* input, size_t input_size, char* op,
const char* ip = input;
assert(input_size <= kBlockSize);
assert((table_size & (table_size - 1)) == 0); // table must be power of two
- const uint32_t mask = table_size - 1;
+ const uint32_t mask = 2 * (table_size - 1);
const char* ip_end = input + input_size;
const char* base_ip = ip;
@@ -785,11 +844,11 @@ char* CompressFragment(const char* input, size_t input_size, char* op,
// loaded in preload.
uint32_t dword = i == 0 ? preload : static_cast<uint32_t>(data);
assert(dword == LittleEndian::Load32(ip + i));
- uint32_t hash = HashBytes(dword, mask);
- candidate = base_ip + table[hash];
+ uint16_t* table_entry = TableEntry(table, dword, mask);
+ candidate = base_ip + *table_entry;
assert(candidate >= base_ip);
assert(candidate < ip + i);
- table[hash] = delta + i;
+ *table_entry = delta + i;
if (SNAPPY_PREDICT_FALSE(LittleEndian::Load32(candidate) == dword)) {
*op = LITERAL | (i << 2);
UnalignedCopy128(next_emit, op + 1);
@@ -806,7 +865,7 @@ char* CompressFragment(const char* input, size_t input_size, char* op,
}
while (true) {
assert(static_cast<uint32_t>(data) == LittleEndian::Load32(ip));
- uint32_t hash = HashBytes(data, mask);
+ uint16_t* table_entry = TableEntry(table, data, mask);
uint32_t bytes_between_hash_lookups = skip >> 5;
skip += bytes_between_hash_lookups;
const char* next_ip = ip + bytes_between_hash_lookups;
@@ -814,11 +873,11 @@ char* CompressFragment(const char* input, size_t input_size, char* op,
ip = next_emit;
goto emit_remainder;
}
- candidate = base_ip + table[hash];
+ candidate = base_ip + *table_entry;
assert(candidate >= base_ip);
assert(candidate < ip);
- table[hash] = ip - base_ip;
+ *table_entry = ip - base_ip;
if (SNAPPY_PREDICT_FALSE(static_cast<uint32_t>(data) ==
LittleEndian::Load32(candidate))) {
break;
@@ -864,12 +923,13 @@ char* CompressFragment(const char* input, size_t input_size, char* op,
assert((data & 0xFFFFFFFFFF) ==
(LittleEndian::Load64(ip) & 0xFFFFFFFFFF));
// We are now looking for a 4-byte match again. We read
- // table[Hash(ip, shift)] for that. To improve compression,
+ // table[Hash(ip, mask)] for that. To improve compression,
// we also update table[Hash(ip - 1, mask)] and table[Hash(ip, mask)].
- table[HashBytes(LittleEndian::Load32(ip - 1), mask)] = ip - base_ip - 1;
- uint32_t hash = HashBytes(data, mask);
- candidate = base_ip + table[hash];
- table[hash] = ip - base_ip;
+ *TableEntry(table, LittleEndian::Load32(ip - 1), mask) =
+ ip - base_ip - 1;
+ uint16_t* table_entry = TableEntry(table, data, mask);
+ candidate = base_ip + *table_entry;
+ *table_entry = ip - base_ip;
// Measurements on the benchmarks have shown the following probabilities
// for the loop to exit (ie. avg. number of iterations is reciprocal).
// BM_Flat/6 txt1 p = 0.3-0.4
@@ -895,12 +955,180 @@ emit_remainder:
return op;
}
+
+char* CompressFragmentDoubleHash(const char* input, size_t input_size, char* op,
+ uint16_t* table, const int table_size,
+ uint16_t* table2, const int table_size2) {
+ (void)table_size2;
+ assert(table_size == table_size2);
+ // "ip" is the input pointer, and "op" is the output pointer.
+ const char* ip = input;
+ assert(input_size <= kBlockSize);
+ assert((table_size & (table_size - 1)) == 0); // table must be power of two
+ const uint32_t mask = 2 * (table_size - 1);
+ const char* ip_end = input + input_size;
+ const char* base_ip = ip;
+
+ const size_t kInputMarginBytes = 15;
+ if (SNAPPY_PREDICT_TRUE(input_size >= kInputMarginBytes)) {
+ const char* ip_limit = input + input_size - kInputMarginBytes;
+
+ for (;;) {
+ const char* next_emit = ip++;
+ uint64_t data = LittleEndian::Load64(ip);
+ uint32_t skip = 512;
+
+ const char* candidate;
+ uint32_t candidate_length;
+ while (true) {
+ assert(static_cast<uint32_t>(data) == LittleEndian::Load32(ip));
+ uint16_t* table_entry2 = TableEntry8ByteMatch(table2, data, mask);
+ uint32_t bytes_between_hash_lookups = skip >> 9;
+ skip++;
+ const char* next_ip = ip + bytes_between_hash_lookups;
+ if (SNAPPY_PREDICT_FALSE(next_ip > ip_limit)) {
+ ip = next_emit;
+ goto emit_remainder;
+ }
+ candidate = base_ip + *table_entry2;
+ assert(candidate >= base_ip);
+ assert(candidate < ip);
+
+ *table_entry2 = ip - base_ip;
+ if (SNAPPY_PREDICT_FALSE(static_cast<uint32_t>(data) ==
+ LittleEndian::Load32(candidate))) {
+ candidate_length =
+ FindMatchLengthPlain(candidate + 4, ip + 4, ip_end) + 4;
+ break;
+ }
+
+ uint16_t* table_entry = TableEntry4ByteMatch(table, data, mask);
+ candidate = base_ip + *table_entry;
+ assert(candidate >= base_ip);
+ assert(candidate < ip);
+
+ *table_entry = ip - base_ip;
+ if (SNAPPY_PREDICT_FALSE(static_cast<uint32_t>(data) ==
+ LittleEndian::Load32(candidate))) {
+ candidate_length =
+ FindMatchLengthPlain(candidate + 4, ip + 4, ip_end) + 4;
+ table_entry2 =
+ TableEntry8ByteMatch(table2, LittleEndian::Load64(ip + 1), mask);
+ auto candidate2 = base_ip + *table_entry2;
+ size_t candidate_length2 =
+ FindMatchLengthPlain(candidate2, ip + 1, ip_end);
+ if (candidate_length2 > candidate_length) {
+ *table_entry2 = ip - base_ip;
+ candidate = candidate2;
+ candidate_length = candidate_length2;
+ ++ip;
+ }
+ break;
+ }
+ data = LittleEndian::Load64(next_ip);
+ ip = next_ip;
+ }
+ // Backtrack to the point it matches fully.
+ while (ip > next_emit && candidate > base_ip &&
+ *(ip - 1) == *(candidate - 1)) {
+ --ip;
+ --candidate;
+ ++candidate_length;
+ }
+ *TableEntry8ByteMatch(table2, LittleEndian::Load64(ip + 1), mask) =
+ ip - base_ip + 1;
+ *TableEntry8ByteMatch(table2, LittleEndian::Load64(ip + 2), mask) =
+ ip - base_ip + 2;
+ *TableEntry4ByteMatch(table, LittleEndian::Load32(ip + 1), mask) =
+ ip - base_ip + 1;
+ // Step 2: A 4-byte or 8-byte match has been found.
+ // We'll later see if more than 4 bytes match. But, prior to the match,
+ // input bytes [next_emit, ip) are unmatched. Emit them as
+ // "literal bytes."
+ assert(next_emit + 16 <= ip_end);
+ if (ip - next_emit > 0) {
+ op = EmitLiteral</*allow_fast_path=*/true>(op, next_emit,
+ ip - next_emit);
+ }
+ // Step 3: Call EmitCopy, and then see if another EmitCopy could
+ // be our next move. Repeat until we find no match for the
+ // input immediately after what was consumed by the last EmitCopy call.
+ //
+ // If we exit this loop normally then we need to call EmitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can exit
+ // this loop via goto if we get close to exhausting the input.
+ do {
+ // We have a 4-byte match at ip, and no need to emit any
+ // "literal bytes" prior to ip.
+ const char* base = ip;
+ ip += candidate_length;
+ size_t offset = base - candidate;
+ if (candidate_length < 12) {
+ op =
+ EmitCopy</*len_less_than_12=*/true>(op, offset, candidate_length);
+ } else {
+ op = EmitCopy</*len_less_than_12=*/false>(op, offset,
+ candidate_length);
+ }
+ if (SNAPPY_PREDICT_FALSE(ip >= ip_limit)) {
+ goto emit_remainder;
+ }
+ // We are now looking for a 4-byte match again. We read
+ // table[Hash(ip, mask)] for that. To improve compression,
+ // we also update several previous table entries.
+ if (ip - base_ip > 7) {
+ *TableEntry8ByteMatch(table2, LittleEndian::Load64(ip - 7), mask) =
+ ip - base_ip - 7;
+ *TableEntry8ByteMatch(table2, LittleEndian::Load64(ip - 4), mask) =
+ ip - base_ip - 4;
+ }
+ *TableEntry8ByteMatch(table2, LittleEndian::Load64(ip - 3), mask) =
+ ip - base_ip - 3;
+ *TableEntry8ByteMatch(table2, LittleEndian::Load64(ip - 2), mask) =
+ ip - base_ip - 2;
+ *TableEntry4ByteMatch(table, LittleEndian::Load32(ip - 2), mask) =
+ ip - base_ip - 2;
+ *TableEntry4ByteMatch(table, LittleEndian::Load32(ip - 1), mask) =
+ ip - base_ip - 1;
+
+ uint16_t* table_entry =
+ TableEntry8ByteMatch(table2, LittleEndian::Load64(ip), mask);
+ candidate = base_ip + *table_entry;
+ *table_entry = ip - base_ip;
+ if (LittleEndian::Load32(ip) == LittleEndian::Load32(candidate)) {
+ candidate_length =
+ FindMatchLengthPlain(candidate + 4, ip + 4, ip_end) + 4;
+ continue;
+ }
+ table_entry =
+ TableEntry4ByteMatch(table, LittleEndian::Load32(ip), mask);
+ candidate = base_ip + *table_entry;
+ *table_entry = ip - base_ip;
+ if (LittleEndian::Load32(ip) == LittleEndian::Load32(candidate)) {
+ candidate_length =
+ FindMatchLengthPlain(candidate + 4, ip + 4, ip_end) + 4;
+ continue;
+ }
+ break;
+ } while (true);
+ }
+ }
+
+emit_remainder:
+ // Emit the remaining bytes as a literal
+ if (ip < ip_end) {
+ op = EmitLiteral</*allow_fast_path=*/false>(op, ip, ip_end - ip);
+ }
+
+ return op;
+}
} // end namespace internal
-// Called back at avery compression call to trace parameters and sizes.
-static inline void Report(const char *algorithm, size_t compressed_size,
- size_t uncompressed_size) {
+static inline void Report(int token, const char *algorithm, size_t
+compressed_size, size_t uncompressed_size) {
// TODO: Switch to [[maybe_unused]] when we can assume C++17.
+ (void)token;
(void)algorithm;
(void)compressed_size;
(void)uncompressed_size;
@@ -962,7 +1190,7 @@ static inline void Report(const char *algorithm, size_t compressed_size,
// bool TryFastAppend(const char* ip, size_t available, size_t length, T* op);
// };
-static inline uint32_t ExtractLowBytes(uint32_t v, int n) {
+static inline uint32_t ExtractLowBytes(const uint32_t& v, int n) {
assert(n >= 0);
assert(n <= 4);
#if SNAPPY_HAVE_BMI2
@@ -991,30 +1219,87 @@ inline bool Copy64BytesWithPatternExtension(ptrdiff_t dst, size_t offset) {
return offset != 0;
}
-void MemCopy(char* dst, const uint8_t* src, size_t size) {
- std::memcpy(dst, src, size);
+// Copies between size bytes and 64 bytes from src to dest. size cannot exceed
+// 64. More than size bytes, but never exceeding 64, might be copied if doing
+// so gives better performance. [src, src + size) must not overlap with
+// [dst, dst + size), but [src, src + 64) may overlap with [dst, dst + 64).
+void MemCopy64(char* dst, const void* src, size_t size) {
+ // Always copy this many bytes. If that's below size then copy the full 64.
+ constexpr int kShortMemCopy = 32;
+
+ assert(size <= 64);
+ assert(std::less_equal<const void*>()(static_cast<const char*>(src) + size,
+ dst) ||
+ std::less_equal<const void*>()(dst + size, src));
+
+ // We know that src and dst are at least size bytes apart. However, because we
+ // might copy more than size bytes the copy still might overlap past size.
+ // E.g. if src and dst appear consecutively in memory (src + size >= dst).
+ // TODO: Investigate wider copies on other platforms.
+#if defined(__x86_64__) && defined(__AVX__)
+ assert(kShortMemCopy <= 32);
+ __m256i data = _mm256_lddqu_si256(static_cast<const __m256i *>(src));
+ _mm256_storeu_si256(reinterpret_cast<__m256i *>(dst), data);
+ // Profiling shows that nearly all copies are short.
+ if (SNAPPY_PREDICT_FALSE(size > kShortMemCopy)) {
+ data = _mm256_lddqu_si256(static_cast<const __m256i *>(src) + 1);
+ _mm256_storeu_si256(reinterpret_cast<__m256i *>(dst) + 1, data);
+ }
+#else
+ std::memmove(dst, src, kShortMemCopy);
+ // Profiling shows that nearly all copies are short.
+ if (SNAPPY_PREDICT_FALSE(size > kShortMemCopy)) {
+ std::memmove(dst + kShortMemCopy,
+ static_cast<const uint8_t*>(src) + kShortMemCopy,
+ 64 - kShortMemCopy);
+ }
+#endif
}
-void MemCopy(ptrdiff_t dst, const uint8_t* src, size_t size) {
+void MemCopy64(ptrdiff_t dst, const void* src, size_t size) {
// TODO: Switch to [[maybe_unused]] when we can assume C++17.
(void)dst;
(void)src;
(void)size;
}
-void MemMove(char* dst, const void* src, size_t size) {
- std::memmove(dst, src, size);
+void ClearDeferred(const void** deferred_src, size_t* deferred_length,
+ uint8_t* safe_source) {
+ *deferred_src = safe_source;
+ *deferred_length = 0;
}
-void MemMove(ptrdiff_t dst, const void* src, size_t size) {
- // TODO: Switch to [[maybe_unused]] when we can assume C++17.
- (void)dst;
- (void)src;
- (void)size;
+void DeferMemCopy(const void** deferred_src, size_t* deferred_length,
+ const void* src, size_t length) {
+ *deferred_src = src;
+ *deferred_length = length;
}
SNAPPY_ATTRIBUTE_ALWAYS_INLINE
-size_t AdvanceToNextTag(const uint8_t** ip_p, size_t* tag) {
+inline size_t AdvanceToNextTagARMOptimized(const uint8_t** ip_p, size_t* tag) {
+ const uint8_t*& ip = *ip_p;
+ // This section is crucial for the throughput of the decompression loop.
+ // The latency of an iteration is fundamentally constrained by the
+ // following data chain on ip.
+ // ip -> c = Load(ip) -> delta1 = (c & 3) -> ip += delta1 or delta2
+ // delta2 = ((c >> 2) + 1) ip++
+ // This is different from X86 optimizations because ARM has conditional add
+ // instruction (csinc) and it removes several register moves.
+ const size_t tag_type = *tag & 3;
+ const bool is_literal = (tag_type == 0);
+ if (is_literal) {
+ size_t next_literal_tag = (*tag >> 2) + 1;
+ *tag = ip[next_literal_tag];
+ ip += next_literal_tag + 1;
+ } else {
+ *tag = ip[tag_type];
+ ip += tag_type + 1;
+ }
+ return tag_type;
+}
+
+SNAPPY_ATTRIBUTE_ALWAYS_INLINE
+inline size_t AdvanceToNextTagX86Optimized(const uint8_t** ip_p, size_t* tag) {
const uint8_t*& ip = *ip_p;
// This section is crucial for the throughput of the decompression loop.
// The latency of an iteration is fundamentally constrained by the
@@ -1026,11 +1311,12 @@ size_t AdvanceToNextTag(const uint8_t** ip_p, size_t* tag) {
size_t literal_len = *tag >> 2;
size_t tag_type = *tag;
bool is_literal;
-#if defined(__GNUC__) && defined(__x86_64__) && defined(__GCC_ASM_FLAG_OUTPUTS__)
+#if defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(__x86_64__)
// TODO clang misses the fact that the (c & 3) already correctly
// sets the zero flag.
asm("and $3, %k[tag_type]\n\t"
- : [tag_type] "+r"(tag_type), "=@ccz"(is_literal));
+ : [tag_type] "+r"(tag_type), "=@ccz"(is_literal)
+ :: "cc");
#else
tag_type &= 3;
is_literal = (tag_type == 0);
@@ -1060,7 +1346,24 @@ size_t AdvanceToNextTag(const uint8_t** ip_p, size_t* tag) {
// Extract the offset for copy-1 and copy-2 returns 0 for literals or copy-4.
inline uint32_t ExtractOffset(uint32_t val, size_t tag_type) {
- return val & table.extract_masks[tag_type];
+ // For x86 non-static storage works better. For ARM static storage is better.
+ // TODO: Once the array is recognized as a register, improve the
+ // readability for x86.
+#if defined(__x86_64__)
+ constexpr uint64_t kExtractMasksCombined = 0x0000FFFF00FF0000ull;
+ uint16_t result;
+ memcpy(&result,
+ reinterpret_cast<const char*>(&kExtractMasksCombined) + 2 * tag_type,
+ sizeof(result));
+ return val & result;
+#elif defined(__aarch64__)
+ constexpr uint64_t kExtractMasksCombined = 0x0000FFFF00FF0000ull;
+ return val & static_cast<uint32_t>(
+ (kExtractMasksCombined >> (tag_type * 16)) & 0xFFFF);
+#else
+ static constexpr uint32_t kExtractMasks[4] = {0, 0xFF, 0xFFFF, 0};
+ return val & kExtractMasks[tag_type];
+#endif
};
// Core decompression loop, when there is enough data available.
@@ -1076,6 +1379,12 @@ template <typename T>
std::pair<const uint8_t*, ptrdiff_t> DecompressBranchless(
const uint8_t* ip, const uint8_t* ip_limit, ptrdiff_t op, T op_base,
ptrdiff_t op_limit_min_slop) {
+ // If deferred_src is invalid point it here.
+ uint8_t safe_source[64];
+ const void* deferred_src;
+ size_t deferred_length;
+ ClearDeferred(&deferred_src, &deferred_length, safe_source);
+
// We unroll the inner loop twice so we need twice the spare room.
op_limit_min_slop -= kSlopBytes;
if (2 * (kSlopBytes + 1) < ip_limit - ip && op < op_limit_min_slop) {
@@ -1084,21 +1393,41 @@ std::pair<const uint8_t*, ptrdiff_t> DecompressBranchless(
// ip points just past the tag and we are touching at maximum kSlopBytes
// in an iteration.
size_t tag = ip[-1];
+#if defined(__clang__) && defined(__aarch64__)
+ // Workaround for https://bugs.llvm.org/show_bug.cgi?id=51317
+ // when loading 1 byte, clang for aarch64 doesn't realize that it(ldrb)
+ // comes with free zero-extension, so clang generates another
+ // 'and xn, xm, 0xff' before it use that as the offset. This 'and' is
+ // redundant and can be removed by adding this dummy asm, which gives
+ // clang a hint that we're doing the zero-extension at the load.
+ asm("" ::"r"(tag));
+#endif
do {
// The throughput is limited by instructions, unrolling the inner loop
// twice reduces the amount of instructions checking limits and also
// leads to reduced mov's.
+
+ SNAPPY_PREFETCH(ip + 128);
for (int i = 0; i < 2; i++) {
const uint8_t* old_ip = ip;
assert(tag == ip[-1]);
// For literals tag_type = 0, hence we will always obtain 0 from
// ExtractLowBytes. For literals offset will thus be kLiteralOffset.
- ptrdiff_t len_min_offset = table.length_minus_offset[tag];
- size_t tag_type = AdvanceToNextTag(&ip, &tag);
- uint32_t next = LittleEndian::Load32(old_ip);
- size_t len = len_min_offset & 0xFF;
- len_min_offset -= ExtractOffset(next, tag_type);
- if (SNAPPY_PREDICT_FALSE(len_min_offset > 0)) {
+ ptrdiff_t len_minus_offset = kLengthMinusOffset[tag];
+ uint32_t next;
+#if defined(__aarch64__)
+ size_t tag_type = AdvanceToNextTagARMOptimized(&ip, &tag);
+ // We never need more than 16 bits. Doing a Load16 allows the compiler
+ // to elide the masking operation in ExtractOffset.
+ next = LittleEndian::Load16(old_ip);
+#else
+ size_t tag_type = AdvanceToNextTagX86Optimized(&ip, &tag);
+ next = LittleEndian::Load32(old_ip);
+#endif
+ size_t len = len_minus_offset & 0xFF;
+ ptrdiff_t extracted = ExtractOffset(next, tag_type);
+ ptrdiff_t len_min_offset = len_minus_offset - extracted;
+ if (SNAPPY_PREDICT_FALSE(len_minus_offset > extracted)) {
if (SNAPPY_PREDICT_FALSE(len & 0x80)) {
// Exceptional case (long literal or copy 4).
// Actually doing the copy here is negatively impacting the main
@@ -1110,39 +1439,29 @@ std::pair<const uint8_t*, ptrdiff_t> DecompressBranchless(
}
// Only copy-1 or copy-2 tags can get here.
assert(tag_type == 1 || tag_type == 2);
- std::ptrdiff_t delta = op + len_min_offset - len;
+ std::ptrdiff_t delta = (op + deferred_length) + len_min_offset - len;
// Guard against copies before the buffer start.
+ // Execute any deferred MemCopy since we write to dst here.
+ MemCopy64(op_base + op, deferred_src, deferred_length);
+ op += deferred_length;
+ ClearDeferred(&deferred_src, &deferred_length, safe_source);
if (SNAPPY_PREDICT_FALSE(delta < 0 ||
!Copy64BytesWithPatternExtension(
op_base + op, len - len_min_offset))) {
goto break_loop;
}
+ // We aren't deferring this copy so add length right away.
op += len;
continue;
}
- std::ptrdiff_t delta = op + len_min_offset - len;
+ std::ptrdiff_t delta = (op + deferred_length) + len_min_offset - len;
if (SNAPPY_PREDICT_FALSE(delta < 0)) {
-#if defined(__GNUC__) && defined(__x86_64__)
- // TODO
- // When validating, both code path reduced to `op += len`. Ie. this
- // becomes effectively
- //
- // if (delta < 0) if (tag_type != 0) goto break_loop;
- // op += len;
- //
- // The compiler interchanges the predictable and almost always false
- // first if-statement with the completely unpredictable second
- // if-statement, putting an unpredictable branch on every iteration.
- // This empty asm is worth almost 2x, which I think qualifies for an
- // award for the most load-bearing empty statement.
- asm("");
-#endif
-
// Due to the spurious offset in literals have this will trigger
// at the start of a block when op is still smaller than 256.
if (tag_type != 0) goto break_loop;
- MemCopy(op_base + op, old_ip, 64);
- op += len;
+ MemCopy64(op_base + op, deferred_src, deferred_length);
+ op += deferred_length;
+ DeferMemCopy(&deferred_src, &deferred_length, old_ip, len);
continue;
}
@@ -1150,14 +1469,23 @@ std::pair<const uint8_t*, ptrdiff_t> DecompressBranchless(
// we need to copy from ip instead of from the stream.
const void* from =
tag_type ? reinterpret_cast<void*>(op_base + delta) : old_ip;
- MemMove(op_base + op, from, 64);
- op += len;
+ MemCopy64(op_base + op, deferred_src, deferred_length);
+ op += deferred_length;
+ DeferMemCopy(&deferred_src, &deferred_length, from, len);
}
- } while (ip < ip_limit_min_slop && op < op_limit_min_slop);
+ } while (ip < ip_limit_min_slop &&
+ static_cast<ptrdiff_t>(op + deferred_length) < op_limit_min_slop);
exit:
ip--;
assert(ip <= ip_limit);
}
+ // If we deferred a copy then we can perform. If we are up to date then we
+ // might not have enough slop bytes and could run past the end.
+ if (deferred_length) {
+ MemCopy64(op_base + op, deferred_src, deferred_length);
+ op += deferred_length;
+ ClearDeferred(&deferred_src, &deferred_length, safe_source);
+ }
return {ip, op};
}
@@ -1325,7 +1653,7 @@ class SnappyDecompressor {
if (!writer->AppendFromSelf(copy_offset, length, &op)) goto exit;
} else {
- const ptrdiff_t entry = table.length_minus_offset[c];
+ const ptrdiff_t entry = kLengthMinusOffset[c];
preload = LittleEndian::Load32(ip);
const uint32_t trailer = ExtractLowBytes(preload, c & 3);
const uint32_t length = entry & 0xff;
@@ -1448,7 +1776,8 @@ template <typename Writer>
static bool InternalUncompressAllTags(SnappyDecompressor* decompressor,
Writer* writer, uint32_t compressed_len,
uint32_t uncompressed_len) {
- Report("snappy_uncompress", compressed_len, uncompressed_len);
+ int token = 0;
+ Report(token, "snappy_uncompress", compressed_len, uncompressed_len);
writer->SetExpectedLength(uncompressed_len);
@@ -1463,7 +1792,9 @@ bool GetUncompressedLength(Source* source, uint32_t* result) {
return decompressor.ReadUncompressedLength(result);
}
-size_t Compress(Source* reader, Sink* writer) {
+size_t Compress(Source* reader, Sink* writer, CompressionOptions options) {
+ assert(options.level == 1 || options.level == 2);
+ int token = 0;
size_t written = 0;
size_t N = reader->Available();
const size_t uncompressed_size = N;
@@ -1510,17 +1841,23 @@ size_t Compress(Source* reader, Sink* writer) {
uint16_t* table = wmem.GetHashTable(num_to_read, &table_size);
// Compress input_fragment and append to dest
- const int max_output = MaxCompressedLength(num_to_read);
-
- // Need a scratch buffer for the output, in case the byte sink doesn't
- // have room for us directly.
+ int max_output = MaxCompressedLength(num_to_read);
// Since we encode kBlockSize regions followed by a region
// which is <= kBlockSize in length, a previously allocated
// scratch_output[] region is big enough for this iteration.
+ // Need a scratch buffer for the output, in case the byte sink doesn't
+ // have room for us directly.
char* dest = writer->GetAppendBuffer(max_output, wmem.GetScratchOutput());
- char* end = internal::CompressFragment(fragment, fragment_size, dest, table,
- table_size);
+ char* end = nullptr;
+ if (options.level == 1) {
+ end = internal::CompressFragment(fragment, fragment_size, dest, table,
+ table_size);
+ } else if (options.level == 2) {
+ end = internal::CompressFragmentDoubleHash(
+ fragment, fragment_size, dest, table, table_size >> 1,
+ table + (table_size >> 1), table_size >> 1);
+ }
writer->Append(dest, end - dest);
written += (end - dest);
@@ -1528,8 +1865,7 @@ size_t Compress(Source* reader, Sink* writer) {
reader->Skip(pending_advance);
}
- Report("snappy_compress", written, uncompressed_size);
-
+ Report(token, "snappy_compress", written, uncompressed_size);
return written;
}
@@ -1537,6 +1873,67 @@ size_t Compress(Source* reader, Sink* writer) {
// IOVec interfaces
// -----------------------------------------------------------------------
+// A `Source` implementation that yields the contents of an `iovec` array. Note
+// that `total_size` is the total number of bytes to be read from the elements
+// of `iov` (_not_ the total number of elements in `iov`).
+class SnappyIOVecReader : public Source {
+ public:
+ SnappyIOVecReader(const struct iovec* iov, size_t total_size)
+ : curr_iov_(iov),
+ curr_pos_(total_size > 0 ? reinterpret_cast<const char*>(iov->iov_base)
+ : nullptr),
+ curr_size_remaining_(total_size > 0 ? iov->iov_len : 0),
+ total_size_remaining_(total_size) {
+ // Skip empty leading `iovec`s.
+ if (total_size > 0 && curr_size_remaining_ == 0) Advance();
+ }
+
+ ~SnappyIOVecReader() override = default;
+
+ size_t Available() const override { return total_size_remaining_; }
+
+ const char* Peek(size_t* len) override {
+ *len = curr_size_remaining_;
+ return curr_pos_;
+ }
+
+ void Skip(size_t n) override {
+ while (n >= curr_size_remaining_ && n > 0) {
+ n -= curr_size_remaining_;
+ Advance();
+ }
+ curr_size_remaining_ -= n;
+ total_size_remaining_ -= n;
+ curr_pos_ += n;
+ }
+
+ private:
+ // Advances to the next nonempty `iovec` and updates related variables.
+ void Advance() {
+ do {
+ assert(total_size_remaining_ >= curr_size_remaining_);
+ total_size_remaining_ -= curr_size_remaining_;
+ if (total_size_remaining_ == 0) {
+ curr_pos_ = nullptr;
+ curr_size_remaining_ = 0;
+ return;
+ }
+ ++curr_iov_;
+ curr_pos_ = reinterpret_cast<const char*>(curr_iov_->iov_base);
+ curr_size_remaining_ = curr_iov_->iov_len;
+ } while (curr_size_remaining_ == 0);
+ }
+
+ // The `iovec` currently being read.
+ const struct iovec* curr_iov_;
+ // The location in `curr_iov_` currently being read.
+ const char* curr_pos_;
+ // The amount of unread data in `curr_iov_`.
+ size_t curr_size_remaining_;
+ // The amount of unread data in the entire input array.
+ size_t total_size_remaining_;
+};
+
// A type that writes to an iovec.
// Note that this is not a "ByteSink", but a type that matches the
// Writer template argument to SnappyDecompressor::DecompressAllTags().
@@ -1902,24 +2299,54 @@ bool IsValidCompressed(Source* compressed) {
}
void RawCompress(const char* input, size_t input_length, char* compressed,
- size_t* compressed_length) {
+ size_t* compressed_length, CompressionOptions options) {
ByteArraySource reader(input, input_length);
UncheckedByteArraySink writer(compressed);
- Compress(&reader, &writer);
+ Compress(&reader, &writer, options);
// Compute how many bytes were added
*compressed_length = (writer.CurrentDestination() - compressed);
}
-size_t Compress(const char* input, size_t input_length,
- std::string* compressed) {
+void RawCompressFromIOVec(const struct iovec* iov, size_t uncompressed_length,
+ char* compressed, size_t* compressed_length,
+ CompressionOptions options) {
+ SnappyIOVecReader reader(iov, uncompressed_length);
+ UncheckedByteArraySink writer(compressed);
+ Compress(&reader, &writer, options);
+
+ // Compute how many bytes were added.
+ *compressed_length = writer.CurrentDestination() - compressed;
+}
+
+size_t Compress(const char* input, size_t input_length, std::string* compressed,
+ CompressionOptions options) {
// Pre-grow the buffer to the max length of the compressed output
STLStringResizeUninitialized(compressed, MaxCompressedLength(input_length));
size_t compressed_length;
RawCompress(input, input_length, string_as_array(compressed),
- &compressed_length);
- compressed->resize(compressed_length);
+ &compressed_length, options);
+ compressed->erase(compressed_length);
+ return compressed_length;
+}
+
+size_t CompressFromIOVec(const struct iovec* iov, size_t iov_cnt,
+ std::string* compressed, CompressionOptions options) {
+ // Compute the number of bytes to be compressed.
+ size_t uncompressed_length = 0;
+ for (size_t i = 0; i < iov_cnt; ++i) {
+ uncompressed_length += iov[i].iov_len;
+ }
+
+ // Pre-grow the buffer to the max length of the compressed output.
+ STLStringResizeUninitialized(compressed, MaxCompressedLength(
+ uncompressed_length));
+
+ size_t compressed_length;
+ RawCompressFromIOVec(iov, uncompressed_length, string_as_array(compressed),
+ &compressed_length, options);
+ compressed->erase(compressed_length);
return compressed_length;
}
@@ -2108,7 +2535,6 @@ bool SnappyScatteredWriter<Allocator>::SlowAppendFromSelf(size_t offset,
class SnappySinkAllocator {
public:
explicit SnappySinkAllocator(Sink* dest) : dest_(dest) {}
- ~SnappySinkAllocator() {}
char* Allocate(int size) {
Datablock block(new char[size], size);
diff --git a/other-licenses/snappy/src/snappy.h b/other-licenses/snappy/src/snappy.h
index e4fdad3354..9e0572dcc8 100644
--- a/other-licenses/snappy/src/snappy.h
+++ b/other-licenses/snappy/src/snappy.h
@@ -50,13 +50,36 @@ namespace snappy {
class Source;
class Sink;
+ struct CompressionOptions {
+ // Compression level.
+ // Level 1 is the fastest
+ // Level 2 is a little slower but provides better compression. Level 2 is
+ // **EXPERIMENTAL** for the time being. It might happen that we decide to
+ // fall back to level 1 in the future.
+ // Levels 3+ are currently not supported. We plan to support levels up to
+ // 9 in the future.
+ // If you played with other compression algorithms, level 1 is equivalent to
+ // fast mode (level 1) of LZ4, level 2 is equivalent to LZ4's level 2 mode
+ // and compresses somewhere around zstd:-3 and zstd:-2 but generally with
+ // faster decompression speeds than snappy:1 and zstd:-3.
+ int level = DefaultCompressionLevel();
+
+ constexpr CompressionOptions() = default;
+ constexpr explicit CompressionOptions(int compression_level)
+ : level(compression_level) {}
+ static constexpr int MinCompressionLevel() { return 1; }
+ static constexpr int MaxCompressionLevel() { return 2; }
+ static constexpr int DefaultCompressionLevel() { return 1; }
+ };
+
// ------------------------------------------------------------------------
// Generic compression/decompression routines.
// ------------------------------------------------------------------------
- // Compress the bytes read from "*source" and append to "*sink". Return the
+ // Compress the bytes read from "*reader" and append to "*writer". Return the
// number of bytes written.
- size_t Compress(Source* source, Sink* sink);
+ size_t Compress(Source* reader, Sink* writer,
+ CompressionOptions options = {});
// Find the uncompressed length of the given stream, as given by the header.
// Note that the true length could deviate from this; the stream could e.g.
@@ -71,14 +94,22 @@ namespace snappy {
// Higher-level string based routines (should be sufficient for most users)
// ------------------------------------------------------------------------
- // Sets "*compressed" to the compressed version of "input[0,input_length-1]".
+ // Sets "*compressed" to the compressed version of "input[0..input_length-1]".
// Original contents of *compressed are lost.
//
// REQUIRES: "input[]" is not an alias of "*compressed".
size_t Compress(const char* input, size_t input_length,
- std::string* compressed);
+ std::string* compressed, CompressionOptions options = {});
+
+ // Same as `Compress` above but taking an `iovec` array as input. Note that
+ // this function preprocesses the inputs to compute the sum of
+ // `iov[0..iov_cnt-1].iov_len` before reading. To avoid this, use
+ // `RawCompressFromIOVec` below.
+ size_t CompressFromIOVec(const struct iovec* iov, size_t iov_cnt,
+ std::string* compressed,
+ CompressionOptions options = {});
- // Decompresses "compressed[0,compressed_length-1]" to "*uncompressed".
+ // Decompresses "compressed[0..compressed_length-1]" to "*uncompressed".
// Original contents of "*uncompressed" are lost.
//
// REQUIRES: "compressed[]" is not an alias of "*uncompressed".
@@ -119,10 +150,15 @@ namespace snappy {
// RawCompress(input, input_length, output, &output_length);
// ... Process(output, output_length) ...
// delete [] output;
- void RawCompress(const char* input,
- size_t input_length,
- char* compressed,
- size_t* compressed_length);
+ void RawCompress(const char* input, size_t input_length, char* compressed,
+ size_t* compressed_length, CompressionOptions options = {});
+
+ // Same as `RawCompress` above but taking an `iovec` array as input. Note that
+ // `uncompressed_length` is the total number of bytes to be read from the
+ // elements of `iov` (_not_ the number of elements in `iov`).
+ void RawCompressFromIOVec(const struct iovec* iov, size_t uncompressed_length,
+ char* compressed, size_t* compressed_length,
+ CompressionOptions options = {});
// Given data in "compressed[0..compressed_length-1]" generated by
// calling the Snappy::Compress routine, this routine
@@ -202,7 +238,7 @@ namespace snappy {
static constexpr int kMinHashTableBits = 8;
static constexpr size_t kMinHashTableSize = 1 << kMinHashTableBits;
- static constexpr int kMaxHashTableBits = 14;
+ static constexpr int kMaxHashTableBits = 15;
static constexpr size_t kMaxHashTableSize = 1 << kMaxHashTableBits;
} // end namespace snappy
diff --git a/other-licenses/snappy/src/snappy_compress_fuzzer.cc b/other-licenses/snappy/src/snappy_compress_fuzzer.cc
index 1d4429a8c1..93254a28b1 100644
--- a/other-licenses/snappy/src/snappy_compress_fuzzer.cc
+++ b/other-licenses/snappy/src/snappy_compress_fuzzer.cc
@@ -39,22 +39,26 @@
// Entry point for LibFuzzer.
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
std::string input(reinterpret_cast<const char*>(data), size);
+ for (int level = snappy::CompressionOptions::MinCompressionLevel();
+ level <= snappy::CompressionOptions::MaxCompressionLevel(); ++level) {
+ std::string compressed;
+ size_t compressed_size =
+ snappy::Compress(input.data(), input.size(), &compressed,
+ snappy::CompressionOptions{/*level=*/level});
- std::string compressed;
- size_t compressed_size =
- snappy::Compress(input.data(), input.size(), &compressed);
+ (void)compressed_size; // Variable only used in debug builds.
+ assert(compressed_size == compressed.size());
+ assert(compressed.size() <= snappy::MaxCompressedLength(input.size()));
+ assert(
+ snappy::IsValidCompressedBuffer(compressed.data(), compressed.size()));
- (void)compressed_size; // Variable only used in debug builds.
- assert(compressed_size == compressed.size());
- assert(compressed.size() <= snappy::MaxCompressedLength(input.size()));
- assert(snappy::IsValidCompressedBuffer(compressed.data(), compressed.size()));
+ std::string uncompressed_after_compress;
+ bool uncompress_succeeded = snappy::Uncompress(
+ compressed.data(), compressed.size(), &uncompressed_after_compress);
- std::string uncompressed_after_compress;
- bool uncompress_succeeded = snappy::Uncompress(
- compressed.data(), compressed.size(), &uncompressed_after_compress);
-
- (void)uncompress_succeeded; // Variable only used in debug builds.
- assert(uncompress_succeeded);
- assert(input == uncompressed_after_compress);
+ (void)uncompress_succeeded; // Variable only used in debug builds.
+ assert(uncompress_succeeded);
+ assert(input == uncompressed_after_compress);
+ }
return 0;
}
diff --git a/other-licenses/snappy/src/snappy_unittest.cc b/other-licenses/snappy/src/snappy_unittest.cc
index 7a85635d73..e57b13d0e8 100644
--- a/other-licenses/snappy/src/snappy_unittest.cc
+++ b/other-licenses/snappy/src/snappy_unittest.cc
@@ -50,7 +50,7 @@ namespace snappy {
namespace {
-#if defined(HAVE_FUNC_MMAP) && defined(HAVE_FUNC_SYSCONF)
+#if HAVE_FUNC_MMAP && HAVE_FUNC_SYSCONF
// To test against code that reads beyond its input, this class copies a
// string to a newly allocated group of pages, the last of which
@@ -96,7 +96,7 @@ class DataEndingAtUnreadablePage {
size_t size_;
};
-#else // defined(HAVE_FUNC_MMAP) && defined(HAVE_FUNC_SYSCONF)
+#else // HAVE_FUNC_MMAP) && HAVE_FUNC_SYSCONF
// Fallback for systems without mmap.
using DataEndingAtUnreadablePage = std::string;
@@ -137,21 +137,10 @@ void VerifyStringSink(const std::string& input) {
CHECK_EQ(uncompressed, input);
}
-void VerifyIOVec(const std::string& input) {
- std::string compressed;
- DataEndingAtUnreadablePage i(input);
- const size_t written = snappy::Compress(i.data(), i.size(), &compressed);
- CHECK_EQ(written, compressed.size());
- CHECK_LE(compressed.size(),
- snappy::MaxCompressedLength(input.size()));
- CHECK(snappy::IsValidCompressedBuffer(compressed.data(), compressed.size()));
-
- // Try uncompressing into an iovec containing a random number of entries
- // ranging from 1 to 10.
- char* buf = new char[input.size()];
+struct iovec* GetIOVec(const std::string& input, char*& buf, size_t& num) {
std::minstd_rand0 rng(input.size());
std::uniform_int_distribution<size_t> uniform_1_to_10(1, 10);
- size_t num = uniform_1_to_10(rng);
+ num = uniform_1_to_10(rng);
if (input.size() < num) {
num = input.size();
}
@@ -175,8 +164,40 @@ void VerifyIOVec(const std::string& input) {
}
used_so_far += iov[i].iov_len;
}
- CHECK(snappy::RawUncompressToIOVec(
- compressed.data(), compressed.size(), iov, num));
+ return iov;
+}
+
+int VerifyIOVecSource(const std::string& input) {
+ std::string compressed;
+ std::string copy = input;
+ char* buf = const_cast<char*>(copy.data());
+ size_t num = 0;
+ struct iovec* iov = GetIOVec(input, buf, num);
+ const size_t written = snappy::CompressFromIOVec(iov, num, &compressed);
+ CHECK_EQ(written, compressed.size());
+ CHECK_LE(compressed.size(), snappy::MaxCompressedLength(input.size()));
+ CHECK(snappy::IsValidCompressedBuffer(compressed.data(), compressed.size()));
+
+ std::string uncompressed;
+ DataEndingAtUnreadablePage c(compressed);
+ CHECK(snappy::Uncompress(c.data(), c.size(), &uncompressed));
+ CHECK_EQ(uncompressed, input);
+ delete[] iov;
+ return uncompressed.size();
+}
+
+void VerifyIOVecSink(const std::string& input) {
+ std::string compressed;
+ DataEndingAtUnreadablePage i(input);
+ const size_t written = snappy::Compress(i.data(), i.size(), &compressed);
+ CHECK_EQ(written, compressed.size());
+ CHECK_LE(compressed.size(), snappy::MaxCompressedLength(input.size()));
+ CHECK(snappy::IsValidCompressedBuffer(compressed.data(), compressed.size()));
+ char* buf = new char[input.size()];
+ size_t num = 0;
+ struct iovec* iov = GetIOVec(input, buf, num);
+ CHECK(snappy::RawUncompressToIOVec(compressed.data(), compressed.size(), iov,
+ num));
CHECK(!memcmp(buf, input.data(), input.size()));
delete[] iov;
delete[] buf;
@@ -252,15 +273,18 @@ int Verify(const std::string& input) {
// Compress using string based routines
const int result = VerifyString(input);
+ // Compress using `iovec`-based routines.
+ CHECK_EQ(VerifyIOVecSource(input), result);
+
// Verify using sink based routines
VerifyStringSink(input);
VerifyNonBlockedCompression(input);
- VerifyIOVec(input);
+ VerifyIOVecSink(input);
if (!input.empty()) {
const std::string expanded = Expand(input);
VerifyNonBlockedCompression(expanded);
- VerifyIOVec(input);
+ VerifyIOVecSink(input);
}
return result;
@@ -540,7 +564,27 @@ TEST(Snappy, FourByteOffset) {
CHECK_EQ(uncompressed, src);
}
-TEST(Snappy, IOVecEdgeCases) {
+TEST(Snappy, IOVecSourceEdgeCases) {
+ // Validate that empty leading, trailing, and in-between iovecs are handled:
+ // [] [] ['a'] [] ['b'] [].
+ std::string data = "ab";
+ char* buf = const_cast<char*>(data.data());
+ size_t used_so_far = 0;
+ static const int kLengths[] = {0, 0, 1, 0, 1, 0};
+ struct iovec iov[ARRAYSIZE(kLengths)];
+ for (int i = 0; i < ARRAYSIZE(kLengths); ++i) {
+ iov[i].iov_base = buf + used_so_far;
+ iov[i].iov_len = kLengths[i];
+ used_so_far += kLengths[i];
+ }
+ std::string compressed;
+ snappy::CompressFromIOVec(iov, ARRAYSIZE(kLengths), &compressed);
+ std::string uncompressed;
+ snappy::Uncompress(compressed.data(), compressed.size(), &uncompressed);
+ CHECK_EQ(data, uncompressed);
+}
+
+TEST(Snappy, IOVecSinkEdgeCases) {
// Test some tricky edge cases in the iovec output that are not necessarily
// exercised by random tests.
@@ -905,7 +949,7 @@ TEST(Snappy, VerifyCharTable) {
// COPY_1_BYTE_OFFSET.
//
// The tag byte in the compressed data stores len-4 in 3 bits, and
- // offset/256 in 5 bits. offset%256 is stored in the next byte.
+ // offset/256 in 3 bits. offset%256 is stored in the next byte.
//
// This format is used for length in range [4..11] and offset in
// range [0..2047]