summaryrefslogtreecommitdiffstats
path: root/third_party/rust/ahash/src
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/rust/ahash/src')
-rw-r--r--third_party/rust/ahash/src/aes_hash.rs293
-rw-r--r--third_party/rust/ahash/src/convert.rs172
-rw-r--r--third_party/rust/ahash/src/fallback_hash.rs223
-rw-r--r--third_party/rust/ahash/src/hash_map.rs177
-rw-r--r--third_party/rust/ahash/src/hash_quality_test.rs451
-rw-r--r--third_party/rust/ahash/src/hash_set.rs267
-rw-r--r--third_party/rust/ahash/src/lib.rs203
-rw-r--r--third_party/rust/ahash/src/operations.rs277
-rw-r--r--third_party/rust/ahash/src/random_state.rs153
-rw-r--r--third_party/rust/ahash/src/specialize.rs162
10 files changed, 2378 insertions, 0 deletions
diff --git a/third_party/rust/ahash/src/aes_hash.rs b/third_party/rust/ahash/src/aes_hash.rs
new file mode 100644
index 0000000000..2593deabdf
--- /dev/null
+++ b/third_party/rust/ahash/src/aes_hash.rs
@@ -0,0 +1,293 @@
+use crate::convert::*;
+use crate::operations::*;
+#[cfg(feature = "specialize")]
+use crate::HasherExt;
+use core::hash::Hasher;
+
+/// A `Hasher` for hashing an arbitrary stream of bytes.
+///
+/// Instances of [`AHasher`] represent state that is updated while hashing data.
+///
+/// Each method updates the internal state based on the new data provided. Once
+/// all of the data has been provided, the resulting hash can be obtained by calling
+/// `finish()`
+///
+/// [Clone] is also provided in case you wish to calculate hashes for two different items that
+/// start with the same data.
+///
+#[derive(Debug, Clone)]
+pub struct AHasher {
+ enc: u128,
+ sum: u128,
+ key: u128,
+}
+
+impl AHasher {
+ /// Creates a new hasher keyed to the provided keys.
+ ///
+ /// Normally hashers are created via `AHasher::default()` for fixed keys or `RandomState::new()` for randomly
+ /// generated keys and `RandomState::with_seeds(a,b)` for seeds that are set and can be reused. All of these work at
+ /// map creation time (and hence don't have any overhead on a per-item bais).
+ ///
+ /// This method directly creates the hasher instance and performs no transformation on the provided seeds. This may
+ /// be useful where a HashBuilder is not desired, such as for testing purposes.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::hash::Hasher;
+ /// use ahash::AHasher;
+ ///
+ /// let mut hasher = AHasher::new_with_keys(1234, 5678);
+ ///
+ /// hasher.write_u32(1989);
+ /// hasher.write_u8(11);
+ /// hasher.write_u8(9);
+ /// hasher.write(b"Huh?");
+ ///
+ /// println!("Hash is {:x}!", hasher.finish());
+ /// ```
+ #[inline]
+ pub fn new_with_keys(key1: u128, key2: u128) -> Self {
+ Self {
+ enc: key1,
+ sum: key2,
+ key: key1 ^ key2,
+ }
+ }
+
+ #[cfg(test)]
+ pub(crate) fn test_with_keys(key1: u64, key2: u64) -> AHasher {
+ use crate::random_state::scramble_keys;
+ let (k1, k2, k3, k4) = scramble_keys(key1, key2);
+ AHasher {
+ enc: [k1, k2].convert(),
+ sum: [k3, k4].convert(),
+ key: add_by_64s([k1, k2], [k3, k4]).convert(),
+ }
+ }
+
+ #[inline(always)]
+ fn add_in_length(&mut self, length: u64) {
+ //This will be scrambled by the next AES round.
+ let mut enc: [u64; 2] = self.enc.convert();
+ enc[0] = enc[0].wrapping_add(length);
+ self.enc = enc.convert();
+ }
+
+ #[inline(always)]
+ fn hash_in(&mut self, new_value: u128) {
+ self.enc = aesenc(self.enc, new_value);
+ self.sum = shuffle_and_add(self.sum, new_value);
+ }
+
+ #[inline(always)]
+ fn hash_in_2(&mut self, v1: u128, v2: u128) {
+ self.enc = aesenc(self.enc, v1);
+ self.sum = shuffle_and_add(self.sum, v1);
+ self.enc = aesenc(self.enc, v2);
+ self.sum = shuffle_and_add(self.sum, v2);
+ }
+}
+
+#[cfg(feature = "specialize")]
+impl HasherExt for AHasher {
+ #[inline]
+ fn hash_u64(self, value: u64) -> u64 {
+ let mask = self.sum as u64;
+ let rot = (self.enc & 64) as u32;
+ (value ^ mask)
+ .folded_multiply(crate::random_state::MULTIPLE)
+ .rotate_left(rot)
+ }
+
+ #[inline]
+ fn short_finish(&self) -> u64 {
+ let buffer: [u64; 2] = self.enc.convert();
+ buffer[0].folded_multiply(buffer[1])
+ }
+}
+
+/// Provides methods to hash all of the primitive types.
+impl Hasher for AHasher {
+ #[inline]
+ fn write_u8(&mut self, i: u8) {
+ self.write_u64(i as u64);
+ }
+
+ #[inline]
+ fn write_u16(&mut self, i: u16) {
+ self.write_u64(i as u64);
+ }
+
+ #[inline]
+ fn write_u32(&mut self, i: u32) {
+ self.write_u64(i as u64);
+ }
+
+ #[inline]
+ fn write_u128(&mut self, i: u128) {
+ self.hash_in(i);
+ }
+
+ #[inline]
+ fn write_usize(&mut self, i: usize) {
+ self.write_u64(i as u64);
+ }
+
+ #[inline]
+ fn write_u64(&mut self, i: u64) {
+ self.write_u128(i as u128);
+ }
+
+ #[inline]
+ #[allow(clippy::collapsible_if)]
+ fn write(&mut self, input: &[u8]) {
+ let mut data = input;
+ let length = data.len();
+ self.add_in_length(length as u64);
+ //A 'binary search' on sizes reduces the number of comparisons.
+ if data.len() < 8 {
+ let value: [u64; 2] = if data.len() >= 2 {
+ if data.len() >= 4 {
+ //len 4-8
+ [data.read_u32().0 as u64, data.read_last_u32() as u64]
+ } else {
+ //len 2-3
+ [data.read_u16().0 as u64, data[data.len() - 1] as u64]
+ }
+ } else {
+ if data.len() > 0 {
+ [data[0] as u64, 0]
+ } else {
+ [0, 0]
+ }
+ };
+ self.hash_in(value.convert());
+ } else {
+ if data.len() > 32 {
+ if data.len() > 64 {
+ let tail = data.read_last_u128x4();
+ let mut current: [u128; 4] = [self.key; 4];
+ current[0] = aesenc(current[0], tail[0]);
+ current[1] = aesenc(current[1], tail[1]);
+ current[2] = aesenc(current[2], tail[2]);
+ current[3] = aesenc(current[3], tail[3]);
+ let mut sum: [u128; 2] = [self.key, self.key];
+ sum[0] = add_by_64s(sum[0].convert(), tail[0].convert()).convert();
+ sum[1] = add_by_64s(sum[1].convert(), tail[1].convert()).convert();
+ sum[0] = shuffle_and_add(sum[0], tail[2]);
+ sum[1] = shuffle_and_add(sum[1], tail[3]);
+ while data.len() > 64 {
+ let (blocks, rest) = data.read_u128x4();
+ current[0] = aesenc(current[0], blocks[0]);
+ current[1] = aesenc(current[1], blocks[1]);
+ current[2] = aesenc(current[2], blocks[2]);
+ current[3] = aesenc(current[3], blocks[3]);
+ sum[0] = shuffle_and_add(sum[0], blocks[0]);
+ sum[1] = shuffle_and_add(sum[1], blocks[1]);
+ sum[0] = shuffle_and_add(sum[0], blocks[2]);
+ sum[1] = shuffle_and_add(sum[1], blocks[3]);
+ data = rest;
+ }
+ self.hash_in_2(aesenc(current[0], current[1]), aesenc(current[2], current[3]));
+ self.hash_in(add_by_64s(sum[0].convert(), sum[1].convert()).convert());
+ } else {
+ //len 33-64
+ let (head, _) = data.read_u128x2();
+ let tail = data.read_last_u128x2();
+ self.hash_in_2(head[0], head[1]);
+ self.hash_in_2(tail[0], tail[1]);
+ }
+ } else {
+ if data.len() > 16 {
+ //len 17-32
+ self.hash_in_2(data.read_u128().0, data.read_last_u128());
+ } else {
+ //len 9-16
+ let value: [u64; 2] = [data.read_u64().0, data.read_last_u64()];
+ self.hash_in(value.convert());
+ }
+ }
+ }
+ }
+ #[inline]
+ fn finish(&self) -> u64 {
+ let combined = aesdec(self.sum, self.enc);
+ let result: [u64; 2] = aesenc(aesenc(combined, self.key), combined).convert();
+ result[0]
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::convert::Convert;
+ use crate::operations::aesenc;
+ use crate::RandomState;
+ use std::hash::{BuildHasher, Hasher};
+ #[test]
+ fn test_sanity() {
+ let mut hasher = RandomState::with_seeds(192837465, 1234567890).build_hasher();
+ hasher.write_u64(0);
+ let h1 = hasher.finish();
+ hasher.write(&[1, 0, 0, 0, 0, 0, 0, 0]);
+ let h2 = hasher.finish();
+ assert_ne!(h1, h2);
+ }
+
+ #[cfg(feature = "compile-time-rng")]
+ #[test]
+ fn test_builder() {
+ use std::collections::HashMap;
+ use std::hash::BuildHasherDefault;
+
+ let mut map = HashMap::<u32, u64, BuildHasherDefault<AHasher>>::default();
+ map.insert(1, 3);
+ }
+
+ #[cfg(feature = "compile-time-rng")]
+ #[test]
+ fn test_default() {
+ let hasher_a = AHasher::default();
+ let a_enc: [u64; 2] = hasher_a.enc.convert();
+ let a_sum: [u64; 2] = hasher_a.sum.convert();
+ assert_ne!(0, a_enc[0]);
+ assert_ne!(0, a_enc[1]);
+ assert_ne!(0, a_sum[0]);
+ assert_ne!(0, a_sum[1]);
+ assert_ne!(a_enc[0], a_enc[1]);
+ assert_ne!(a_sum[0], a_sum[1]);
+ assert_ne!(a_enc[0], a_sum[0]);
+ assert_ne!(a_enc[1], a_sum[1]);
+ let hasher_b = AHasher::default();
+ let b_enc: [u64; 2] = hasher_b.enc.convert();
+ let b_sum: [u64; 2] = hasher_b.sum.convert();
+ assert_eq!(a_enc[0], b_enc[0]);
+ assert_eq!(a_enc[1], b_enc[1]);
+ assert_eq!(a_sum[0], b_sum[0]);
+ assert_eq!(a_sum[1], b_sum[1]);
+ }
+
+ #[test]
+ fn test_hash() {
+ let mut result: [u64; 2] = [0x6c62272e07bb0142, 0x62b821756295c58d];
+ let value: [u64; 2] = [1 << 32, 0xFEDCBA9876543210];
+ result = aesenc(value.convert(), result.convert()).convert();
+ result = aesenc(result.convert(), result.convert()).convert();
+ let mut result2: [u64; 2] = [0x6c62272e07bb0142, 0x62b821756295c58d];
+ let value2: [u64; 2] = [1, 0xFEDCBA9876543210];
+ result2 = aesenc(value2.convert(), result2.convert()).convert();
+ result2 = aesenc(result2.convert(), result.convert()).convert();
+ let result: [u8; 16] = result.convert();
+ let result2: [u8; 16] = result2.convert();
+ assert_ne!(hex::encode(result), hex::encode(result2));
+ }
+
+ #[test]
+ fn test_conversion() {
+ let input: &[u8] = "dddddddd".as_bytes();
+ let bytes: u64 = as_array!(input, 8).convert();
+ assert_eq!(bytes, 0x6464646464646464);
+ }
+}
diff --git a/third_party/rust/ahash/src/convert.rs b/third_party/rust/ahash/src/convert.rs
new file mode 100644
index 0000000000..435c03c492
--- /dev/null
+++ b/third_party/rust/ahash/src/convert.rs
@@ -0,0 +1,172 @@
+pub(crate) trait Convert<To> {
+ fn convert(self) -> To;
+}
+
+macro_rules! convert {
+ ($a:ty, $b:ty) => {
+ impl Convert<$b> for $a {
+ #[inline(always)]
+ fn convert(self) -> $b {
+ unsafe {
+ let mut result: $b = core::mem::zeroed();
+ core::ptr::copy_nonoverlapping(
+ &self as *const $a as *const u8,
+ &mut result as *mut $b as *mut u8,
+ core::mem::size_of::<$b>(),
+ );
+ return result;
+ }
+ }
+ }
+ impl Convert<$a> for $b {
+ #[inline(always)]
+ fn convert(self) -> $a {
+ unsafe {
+ let mut result: $a = core::mem::zeroed();
+ core::ptr::copy_nonoverlapping(
+ &self as *const $b as *const u8,
+ &mut result as *mut $a as *mut u8,
+ core::mem::size_of::<$a>(),
+ );
+ return result;
+ }
+ }
+ }
+ };
+}
+
+convert!([u128; 4], [u64; 8]);
+convert!([u128; 4], [u32; 16]);
+convert!([u128; 4], [u16; 32]);
+convert!([u128; 4], [u8; 64]);
+convert!([u128; 2], [u64; 4]);
+convert!([u128; 2], [u32; 8]);
+convert!([u128; 2], [u16; 16]);
+convert!([u128; 2], [u8; 32]);
+convert!(u128, [u64; 2]);
+convert!(u128, [u32; 4]);
+convert!(u128, [u16; 8]);
+convert!(u128, [u8; 16]);
+convert!([u64; 2], [u32; 4]);
+convert!([u64; 2], [u16; 8]);
+convert!([u64; 2], [u8; 16]);
+convert!([u32; 4], [u16; 8]);
+convert!([u32; 4], [u8; 16]);
+convert!([u16; 8], [u8; 16]);
+convert!(u64, [u32; 2]);
+convert!(u64, [u16; 4]);
+convert!(u64, [u8; 8]);
+convert!([u32; 2], [u16; 4]);
+convert!([u32; 2], [u8; 8]);
+convert!(u32, [u16; 2]);
+convert!(u32, [u8; 4]);
+convert!([u16; 2], [u8; 4]);
+convert!(u16, [u8; 2]);
+
+convert!([f64; 2], [u8; 16]);
+convert!([f32; 4], [u8; 16]);
+convert!(f64, [u8; 8]);
+convert!([f32; 2], [u8; 8]);
+convert!(f32, [u8; 4]);
+
+macro_rules! as_array {
+ ($input:expr, $len:expr) => {{
+ {
+ #[inline(always)]
+ fn as_array<T>(slice: &[T]) -> &[T; $len] {
+ assert_eq!(slice.len(), $len);
+ unsafe { &*(slice.as_ptr() as *const [_; $len]) }
+ }
+ as_array($input)
+ }
+ }};
+}
+
+pub(crate) trait ReadFromSlice {
+ fn read_u16(&self) -> (u16, &[u8]);
+ fn read_u32(&self) -> (u32, &[u8]);
+ fn read_u64(&self) -> (u64, &[u8]);
+ fn read_u128(&self) -> (u128, &[u8]);
+ fn read_u128x2(&self) -> ([u128; 2], &[u8]);
+ fn read_u128x4(&self) -> ([u128; 4], &[u8]);
+ fn read_last_u16(&self) -> u16;
+ fn read_last_u32(&self) -> u32;
+ fn read_last_u64(&self) -> u64;
+ fn read_last_u128(&self) -> u128;
+ fn read_last_u128x2(&self) -> [u128; 2];
+ fn read_last_u128x4(&self) -> [u128; 4];
+}
+
+impl ReadFromSlice for [u8] {
+ #[inline(always)]
+ fn read_u16(&self) -> (u16, &[u8]) {
+ let (value, rest) = self.split_at(2);
+ (as_array!(value, 2).convert(), rest)
+ }
+
+ #[inline(always)]
+ fn read_u32(&self) -> (u32, &[u8]) {
+ let (value, rest) = self.split_at(4);
+ (as_array!(value, 4).convert(), rest)
+ }
+
+ #[inline(always)]
+ fn read_u64(&self) -> (u64, &[u8]) {
+ let (value, rest) = self.split_at(8);
+ (as_array!(value, 8).convert(), rest)
+ }
+
+ #[inline(always)]
+ fn read_u128(&self) -> (u128, &[u8]) {
+ let (value, rest) = self.split_at(16);
+ (as_array!(value, 16).convert(), rest)
+ }
+
+ #[inline(always)]
+ fn read_u128x2(&self) -> ([u128; 2], &[u8]) {
+ let (value, rest) = self.split_at(32);
+ (as_array!(value, 32).convert(), rest)
+ }
+
+ #[inline(always)]
+ fn read_u128x4(&self) -> ([u128; 4], &[u8]) {
+ let (value, rest) = self.split_at(64);
+ (as_array!(value, 64).convert(), rest)
+ }
+
+ #[inline(always)]
+ fn read_last_u16(&self) -> u16 {
+ let (_, value) = self.split_at(self.len() - 2);
+ as_array!(value, 2).convert()
+ }
+
+ #[inline(always)]
+ fn read_last_u32(&self) -> u32 {
+ let (_, value) = self.split_at(self.len() - 4);
+ as_array!(value, 4).convert()
+ }
+
+ #[inline(always)]
+ fn read_last_u64(&self) -> u64 {
+ let (_, value) = self.split_at(self.len() - 8);
+ as_array!(value, 8).convert()
+ }
+
+ #[inline(always)]
+ fn read_last_u128(&self) -> u128 {
+ let (_, value) = self.split_at(self.len() - 16);
+ as_array!(value, 16).convert()
+ }
+
+ #[inline(always)]
+ fn read_last_u128x2(&self) -> [u128; 2] {
+ let (_, value) = self.split_at(self.len() - 32);
+ as_array!(value, 32).convert()
+ }
+
+ #[inline(always)]
+ fn read_last_u128x4(&self) -> [u128; 4] {
+ let (_, value) = self.split_at(self.len() - 64);
+ as_array!(value, 64).convert()
+ }
+}
diff --git a/third_party/rust/ahash/src/fallback_hash.rs b/third_party/rust/ahash/src/fallback_hash.rs
new file mode 100644
index 0000000000..6ba796e0e8
--- /dev/null
+++ b/third_party/rust/ahash/src/fallback_hash.rs
@@ -0,0 +1,223 @@
+use crate::convert::*;
+use crate::operations::folded_multiply;
+#[cfg(feature = "specialize")]
+use crate::HasherExt;
+use core::hash::Hasher;
+
+///This constant come from Kunth's prng (Empirically it works better than those from splitmix32).
+const MULTIPLE: u64 = crate::random_state::MULTIPLE;
+const ROT: u32 = 23; //17
+
+/// A `Hasher` for hashing an arbitrary stream of bytes.
+///
+/// Instances of [`AHasher`] represent state that is updated while hashing data.
+///
+/// Each method updates the internal state based on the new data provided. Once
+/// all of the data has been provided, the resulting hash can be obtained by calling
+/// `finish()`
+///
+/// [Clone] is also provided in case you wish to calculate hashes for two different items that
+/// start with the same data.
+///
+#[derive(Debug, Clone)]
+pub struct AHasher {
+ buffer: u64,
+ pad: u64,
+ extra_keys: [u64; 2],
+}
+
+impl AHasher {
+ /// Creates a new hasher keyed to the provided key.
+ #[inline]
+ #[allow(dead_code)] // Is not called if non-fallback hash is used.
+ pub fn new_with_keys(key1: u128, key2: u128) -> AHasher {
+ AHasher {
+ buffer: key1 as u64,
+ pad: key2 as u64,
+ extra_keys: (key1 ^ key2).convert(),
+ }
+ }
+
+ #[cfg(test)]
+ #[allow(dead_code)] // Is not called if non-fallback hash is used.
+ pub(crate) fn test_with_keys(key1: u64, key2: u64) -> AHasher {
+ use crate::random_state::scramble_keys;
+ let (k1, k2, k3, k4) = scramble_keys(key1, key2);
+ AHasher {
+ buffer: k1,
+ pad: k2,
+ extra_keys: [k3, k4],
+ }
+ }
+
+ /// This update function has the goal of updating the buffer with a single multiply
+ /// FxHash does this but is vulnerable to attack. To avoid this input needs to be masked to with an
+ /// unpredictable value. Other hashes such as murmurhash have taken this approach but were found vulnerable
+ /// to attack. The attack was based on the idea of reversing the pre-mixing (Which is necessarily
+ /// reversible otherwise bits would be lost) then placing a difference in the highest bit before the
+ /// multiply used to mix the data. Because a multiply can never affect the bits to the right of it, a
+ /// subsequent update that also differed in this bit could result in a predictable collision.
+ ///
+ /// This version avoids this vulnerability while still only using a single multiply. It takes advantage
+ /// of the fact that when a 64 bit multiply is performed the upper 64 bits are usually computed and thrown
+ /// away. Instead it creates two 128 bit values where the upper 64 bits are zeros and multiplies them.
+ /// (The compiler is smart enough to turn this into a 64 bit multiplication in the assembly)
+ /// Then the upper bits are xored with the lower bits to produce a single 64 bit result.
+ ///
+ /// To understand why this is a good scrambling function it helps to understand multiply-with-carry PRNGs:
+ /// https://en.wikipedia.org/wiki/Multiply-with-carry_pseudorandom_number_generator
+ /// If the multiple is chosen well, this creates a long period, decent quality PRNG.
+ /// Notice that this function is equivalent to this except the `buffer`/`state` is being xored with each
+ /// new block of data. In the event that data is all zeros, it is exactly equivalent to a MWC PRNG.
+ ///
+ /// This is impervious to attack because every bit buffer at the end is dependent on every bit in
+ /// `new_data ^ buffer`. For example suppose two inputs differed in only the 5th bit. Then when the
+ /// multiplication is performed the `result` will differ in bits 5-69. More specifically it will differ by
+ /// 2^5 * MULTIPLE. However in the next step bits 65-128 are turned into a separate 64 bit value. So the
+ /// differing bits will be in the lower 6 bits of this value. The two intermediate values that differ in
+ /// bits 5-63 and in bits 0-5 respectively get added together. Producing an output that differs in every
+ /// bit. The addition carries in the multiplication and at the end additionally mean that the even if an
+ /// attacker somehow knew part of (but not all) the contents of the buffer before hand,
+ /// they would not be able to predict any of the bits in the buffer at the end.
+ #[inline(always)]
+ fn update(&mut self, new_data: u64) {
+ self.buffer = folded_multiply(new_data ^ self.buffer, MULTIPLE);
+ }
+
+ /// Similar to the above this function performs an update using a "folded multiply".
+ /// However it takes in 128 bits of data instead of 64. Both halves must be masked.
+ ///
+ /// This makes it impossible for an attacker to place a single bit difference between
+ /// two blocks so as to cancel each other.
+ ///
+ /// However this is not sufficient. to prevent (a,b) from hashing the same as (b,a) the buffer itself must
+ /// be updated between calls in a way that does not commute. To achieve this XOR and Rotate are used.
+ /// Add followed by xor is not the same as xor followed by add, and rotate ensures that the same out bits
+ /// can't be changed by the same set of input bits. To cancel this sequence with subsequent input would require
+ /// knowing the keys.
+ #[inline(always)]
+ fn large_update(&mut self, new_data: u128) {
+ let block: [u64; 2] = new_data.convert();
+ let combined = folded_multiply(block[0] ^ self.extra_keys[0], block[1] ^ self.extra_keys[1]);
+ self.buffer = (self.pad.wrapping_add(combined) ^ self.buffer).rotate_left(ROT);
+ }
+}
+
+#[cfg(feature = "specialize")]
+impl HasherExt for AHasher {
+ #[inline]
+ fn hash_u64(self, value: u64) -> u64 {
+ let rot = (self.pad & 64) as u32;
+ folded_multiply(value ^ self.buffer, MULTIPLE).rotate_left(rot)
+ }
+
+ #[inline]
+ fn short_finish(&self) -> u64 {
+ self.buffer.wrapping_add(self.pad)
+ }
+}
+
+/// Provides methods to hash all of the primitive types.
+impl Hasher for AHasher {
+ #[inline]
+ fn write_u8(&mut self, i: u8) {
+ self.update(i as u64);
+ }
+
+ #[inline]
+ fn write_u16(&mut self, i: u16) {
+ self.update(i as u64);
+ }
+
+ #[inline]
+ fn write_u32(&mut self, i: u32) {
+ self.update(i as u64);
+ }
+
+ #[inline]
+ fn write_u64(&mut self, i: u64) {
+ self.update(i as u64);
+ }
+
+ #[inline]
+ fn write_u128(&mut self, i: u128) {
+ let data: [u64; 2] = i.convert();
+ self.update(data[0]);
+ self.update(data[1]);
+ }
+
+ #[inline]
+ fn write_usize(&mut self, i: usize) {
+ self.write_u64(i as u64);
+ }
+
+ #[inline]
+ #[allow(clippy::collapsible_if)]
+ fn write(&mut self, input: &[u8]) {
+ let mut data = input;
+ let length = data.len() as u64;
+ //Needs to be an add rather than an xor because otherwise it could be canceled with carefully formed input.
+ self.buffer = self.buffer.wrapping_add(length).wrapping_mul(MULTIPLE);
+ //A 'binary search' on sizes reduces the number of comparisons.
+ if data.len() > 8 {
+ if data.len() > 16 {
+ let tail = data.read_last_u128();
+ self.large_update(tail);
+ while data.len() > 16 {
+ let (block, rest) = data.read_u128();
+ self.large_update(block);
+ data = rest;
+ }
+ } else {
+ self.large_update([data.read_u64().0, data.read_last_u64()].convert());
+ }
+ } else {
+ if data.len() >= 2 {
+ if data.len() >= 4 {
+ let block = [data.read_u32().0 as u64, data.read_last_u32() as u64];
+ self.large_update(block.convert());
+ } else {
+ let value = [data.read_u16().0 as u32, data[data.len() - 1] as u32];
+ self.update(value.convert());
+ }
+ } else {
+ if data.len() > 0 {
+ self.update(data[0] as u64);
+ }
+ }
+ }
+ }
+ #[inline]
+ fn finish(&self) -> u64 {
+ let rot = (self.buffer & 63) as u32;
+ folded_multiply(self.buffer, self.pad).rotate_left(rot)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::convert::Convert;
+ use crate::fallback_hash::*;
+
+ #[test]
+ fn test_hash() {
+ let mut hasher = AHasher::new_with_keys(0, 0);
+ let value: u64 = 1 << 32;
+ hasher.update(value);
+ let result = hasher.buffer;
+ let mut hasher = AHasher::new_with_keys(0, 0);
+ let value2: u64 = 1;
+ hasher.update(value2);
+ let result2 = hasher.buffer;
+ let result: [u8; 8] = result.convert();
+ let result2: [u8; 8] = result2.convert();
+ assert_ne!(hex::encode(result), hex::encode(result2));
+ }
+
+ #[test]
+ fn test_conversion() {
+ let input: &[u8] = "dddddddd".as_bytes();
+ let bytes: u64 = as_array!(input, 8).convert();
+ assert_eq!(bytes, 0x6464646464646464);
+ }
+}
diff --git a/third_party/rust/ahash/src/hash_map.rs b/third_party/rust/ahash/src/hash_map.rs
new file mode 100644
index 0000000000..362ac8551a
--- /dev/null
+++ b/third_party/rust/ahash/src/hash_map.rs
@@ -0,0 +1,177 @@
+use std::borrow::Borrow;
+use std::collections::{hash_map, HashMap};
+use std::fmt::{self, Debug};
+use std::hash::{BuildHasher, Hash};
+use std::iter::FromIterator;
+use std::ops::{Deref, DerefMut, Index};
+use std::panic::UnwindSafe;
+
+/// A [`HashMap`](std::collections::HashMap) using [`RandomState`](crate::RandomState) to hash the items.
+/// Requires the `std` feature to be enabled.
+#[derive(Clone)]
+pub struct AHashMap<K, V, S = crate::RandomState>(HashMap<K, V, S>);
+
+impl<K, V, S> AHashMap<K, V, S>
+where
+ K: Hash + Eq,
+ S: BuildHasher + Default,
+{
+ pub fn new() -> Self {
+ AHashMap(HashMap::with_hasher(S::default()))
+ }
+
+ pub fn with_capacity(capacity: usize) -> Self {
+ AHashMap(HashMap::with_capacity_and_hasher(capacity, S::default()))
+ }
+}
+
+impl<K, V, S> AHashMap<K, V, S>
+where
+ K: Hash + Eq,
+ S: BuildHasher,
+{
+ pub fn with_hasher(hash_builder: S) -> Self {
+ AHashMap(HashMap::with_hasher(hash_builder))
+ }
+
+ pub fn with_capacity_and_hasher(capacity: usize, hash_builder: S) -> Self {
+ AHashMap(HashMap::with_capacity_and_hasher(capacity, hash_builder))
+ }
+}
+
+impl<K, V, S> Deref for AHashMap<K, V, S> {
+ type Target = HashMap<K, V, S>;
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+}
+
+impl<K, V, S> DerefMut for AHashMap<K, V, S> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.0
+ }
+}
+
+impl<K, V, S> UnwindSafe for AHashMap<K, V, S>
+where
+ K: UnwindSafe,
+ V: UnwindSafe,
+{
+}
+
+impl<K, V, S> PartialEq for AHashMap<K, V, S>
+where
+ K: Eq + Hash,
+ V: PartialEq,
+ S: BuildHasher,
+{
+ fn eq(&self, other: &AHashMap<K, V, S>) -> bool {
+ self.0.eq(&other.0)
+ }
+}
+
+impl<K, V, S> Eq for AHashMap<K, V, S>
+where
+ K: Eq + Hash,
+ V: Eq,
+ S: BuildHasher,
+{
+}
+
+impl<K, Q: ?Sized, V, S> Index<&Q> for AHashMap<K, V, S>
+where
+ K: Eq + Hash + Borrow<Q>,
+ Q: Eq + Hash,
+ S: BuildHasher,
+{
+ type Output = V;
+
+ /// Returns a reference to the value corresponding to the supplied key.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the key is not present in the `HashMap`.
+ #[inline]
+ fn index(&self, key: &Q) -> &V {
+ self.0.index(key)
+ }
+}
+
+impl<K, V, S> Debug for AHashMap<K, V, S>
+where
+ K: Eq + Hash + Debug,
+ V: Debug,
+ S: BuildHasher,
+{
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ self.0.fmt(fmt)
+ }
+}
+
+impl<K, V, S> FromIterator<(K, V)> for AHashMap<K, V, S>
+where
+ K: Eq + Hash,
+ S: BuildHasher + Default,
+{
+ fn from_iter<T: IntoIterator<Item = (K, V)>>(iter: T) -> Self {
+ AHashMap(HashMap::from_iter(iter))
+ }
+}
+
+impl<'a, K, V, S> IntoIterator for &'a AHashMap<K, V, S> {
+ type Item = (&'a K, &'a V);
+ type IntoIter = hash_map::Iter<'a, K, V>;
+ fn into_iter(self) -> Self::IntoIter {
+ (&self.0).iter()
+ }
+}
+
+impl<'a, K, V, S> IntoIterator for &'a mut AHashMap<K, V, S> {
+ type Item = (&'a K, &'a mut V);
+ type IntoIter = hash_map::IterMut<'a, K, V>;
+ fn into_iter(self) -> Self::IntoIter {
+ (&mut self.0).iter_mut()
+ }
+}
+
+impl<K, V, S> IntoIterator for AHashMap<K, V, S> {
+ type Item = (K, V);
+ type IntoIter = hash_map::IntoIter<K, V>;
+ fn into_iter(self) -> Self::IntoIter {
+ self.0.into_iter()
+ }
+}
+
+impl<K, V, S> Extend<(K, V)> for AHashMap<K, V, S>
+where
+ K: Eq + Hash,
+ S: BuildHasher,
+{
+ #[inline]
+ fn extend<T: IntoIterator<Item = (K, V)>>(&mut self, iter: T) {
+ self.0.extend(iter)
+ }
+}
+
+impl<'a, K, V, S> Extend<(&'a K, &'a V)> for AHashMap<K, V, S>
+where
+ K: Eq + Hash + Copy + 'a,
+ V: Copy + 'a,
+ S: BuildHasher,
+{
+ #[inline]
+ fn extend<T: IntoIterator<Item = (&'a K, &'a V)>>(&mut self, iter: T) {
+ self.0.extend(iter)
+ }
+}
+
+impl<K, V, S> Default for AHashMap<K, V, S>
+where
+ K: Eq + Hash,
+ S: BuildHasher + Default,
+{
+ #[inline]
+ fn default() -> AHashMap<K, V, S> {
+ AHashMap::with_hasher(Default::default())
+ }
+}
diff --git a/third_party/rust/ahash/src/hash_quality_test.rs b/third_party/rust/ahash/src/hash_quality_test.rs
new file mode 100644
index 0000000000..1a69562daa
--- /dev/null
+++ b/third_party/rust/ahash/src/hash_quality_test.rs
@@ -0,0 +1,451 @@
+use crate::{CallHasher, HasherExt};
+use core::hash::{Hash, Hasher};
+use std::collections::HashMap;
+
+fn assert_sufficiently_different(a: u64, b: u64, tolerance: i32) {
+ let (same_byte_count, same_nibble_count) = count_same_bytes_and_nibbles(a, b);
+ assert!(same_byte_count <= tolerance, "{:x} vs {:x}: {:}", a, b, same_byte_count);
+ assert!(
+ same_nibble_count <= tolerance * 3,
+ "{:x} vs {:x}: {:}",
+ a,
+ b,
+ same_nibble_count
+ );
+ let flipped_bits = (a ^ b).count_ones();
+ assert!(
+ flipped_bits > 12 && flipped_bits < 52,
+ "{:x} and {:x}: {:}",
+ a,
+ b,
+ flipped_bits
+ );
+ for rotate in 0..64 {
+ let flipped_bits2 = (a ^ (b.rotate_left(rotate))).count_ones();
+ assert!(
+ flipped_bits2 > 10 && flipped_bits2 < 54,
+ "{:x} and {:x}: {:}",
+ a,
+ b.rotate_left(rotate),
+ flipped_bits2
+ );
+ }
+}
+
+fn count_same_bytes_and_nibbles(a: u64, b: u64) -> (i32, i32) {
+ let mut same_byte_count = 0;
+ let mut same_nibble_count = 0;
+ for byte in 0..8 {
+ let ba = (a >> (8 * byte)) as u8;
+ let bb = (b >> (8 * byte)) as u8;
+ if ba == bb {
+ same_byte_count += 1;
+ }
+ if ba & 0xF0u8 == bb & 0xF0u8 {
+ same_nibble_count += 1;
+ }
+ if ba & 0x0Fu8 == bb & 0x0Fu8 {
+ same_nibble_count += 1;
+ }
+ }
+ (same_byte_count, same_nibble_count)
+}
+
+fn gen_combinations(options: &[u32; 8], depth: u32, so_far: Vec<u32>, combinations: &mut Vec<Vec<u32>>) {
+ if depth == 0 {
+ return;
+ }
+ for option in options {
+ let mut next = so_far.clone();
+ next.push(*option);
+ combinations.push(next.clone());
+ gen_combinations(options, depth - 1, next, combinations);
+ }
+}
+
+fn test_no_full_collisions<T: Hasher>(gen_hash: impl Fn() -> T) {
+ let options: [u32; 8] = [
+ 0x00000000, 0x20000000, 0x40000000, 0x60000000, 0x80000000, 0xA0000000, 0xC0000000, 0xE0000000,
+ ];
+ let mut combinations = Vec::new();
+ gen_combinations(&options, 7, Vec::new(), &mut combinations);
+ let mut map: HashMap<u64, Vec<u8>> = HashMap::new();
+ for combination in combinations {
+ let array = unsafe {
+ let (begin, middle, end) = combination.align_to::<u8>();
+ assert_eq!(0, begin.len());
+ assert_eq!(0, end.len());
+ middle.to_vec()
+ };
+ let mut hasher = gen_hash();
+ hasher.write(&array);
+ let hash = hasher.finish();
+ if let Some(value) = map.get(&hash) {
+ assert_eq!(
+ value, &array,
+ "Found a collision between {:x?} and {:x?}",
+ value, &array
+ );
+ } else {
+ map.insert(hash, array);
+ }
+ }
+ assert_eq!(2396744, map.len());
+}
+
+fn test_keys_change_output<T: HasherExt>(constructor: impl Fn(u64, u64) -> T) {
+ let mut a = constructor(1, 1);
+ let mut b = constructor(1, 2);
+ let mut c = constructor(2, 1);
+ let mut d = constructor(2, 2);
+ "test".hash(&mut a);
+ "test".hash(&mut b);
+ "test".hash(&mut c);
+ "test".hash(&mut d);
+ assert_sufficiently_different(a.finish(), b.finish(), 1);
+ assert_sufficiently_different(a.finish(), c.finish(), 1);
+ assert_sufficiently_different(a.finish(), d.finish(), 1);
+ assert_sufficiently_different(b.finish(), c.finish(), 1);
+ assert_sufficiently_different(b.finish(), d.finish(), 1);
+ assert_sufficiently_different(c.finish(), d.finish(), 1);
+}
+
+fn test_input_affect_every_byte<T: HasherExt>(constructor: impl Fn(u64, u64) -> T) {
+ let base = 0.get_hash(constructor(0, 0));
+ for shift in 0..16 {
+ let mut alternitives = vec![];
+ for v in 0..256 {
+ let input = (v as u128) << (shift * 8);
+ let hasher = constructor(0, 0);
+ alternitives.push(input.get_hash(hasher));
+ }
+ assert_each_byte_differs(base, alternitives);
+ }
+}
+
+///Ensures that for every bit in the output there is some value for each byte in the key that flips it.
+fn test_keys_affect_every_byte<H: Hash, T: HasherExt>(item: H, constructor: impl Fn(u64, u64) -> T) {
+ let base = item.get_hash(constructor(0, 0));
+ for shift in 0..8 {
+ let mut alternitives1 = vec![];
+ let mut alternitives2 = vec![];
+ for v in 0..256 {
+ let input = (v as u64) << (shift * 8);
+ let hasher1 = constructor(input, 0);
+ let hasher2 = constructor(0, input);
+ let h1 = item.get_hash(hasher1);
+ let h2 = item.get_hash(hasher2);
+ alternitives1.push(h1);
+ alternitives2.push(h2);
+ }
+ assert_each_byte_differs(base, alternitives1);
+ assert_each_byte_differs(base, alternitives2);
+ }
+}
+
+fn assert_each_byte_differs(base: u64, alternitives: Vec<u64>) {
+ let mut changed_bits = 0_u64;
+ for alternitive in alternitives {
+ changed_bits |= base ^ alternitive
+ }
+ assert_eq!(core::u64::MAX, changed_bits, "Bits changed: {:x}", changed_bits);
+}
+
+fn test_finish_is_consistent<T: Hasher>(constructor: impl Fn(u64, u64) -> T) {
+ let mut hasher = constructor(1, 2);
+ "Foo".hash(&mut hasher);
+ let a = hasher.finish();
+ let b = hasher.finish();
+ assert_eq!(a, b);
+}
+
+fn test_single_key_bit_flip<T: Hasher>(constructor: impl Fn(u64, u64) -> T) {
+ for bit in 0..64 {
+ let mut a = constructor(0, 0);
+ let mut b = constructor(0, 1 << bit);
+ let mut c = constructor(1 << bit, 0);
+ "1234".hash(&mut a);
+ "1234".hash(&mut b);
+ "1234".hash(&mut c);
+ assert_sufficiently_different(a.finish(), b.finish(), 2);
+ assert_sufficiently_different(a.finish(), c.finish(), 2);
+ assert_sufficiently_different(b.finish(), c.finish(), 2);
+ let mut a = constructor(0, 0);
+ let mut b = constructor(0, 1 << bit);
+ let mut c = constructor(1 << bit, 0);
+ "12345678".hash(&mut a);
+ "12345678".hash(&mut b);
+ "12345678".hash(&mut c);
+ assert_sufficiently_different(a.finish(), b.finish(), 2);
+ assert_sufficiently_different(a.finish(), c.finish(), 2);
+ assert_sufficiently_different(b.finish(), c.finish(), 2);
+ let mut a = constructor(0, 0);
+ let mut b = constructor(0, 1 << bit);
+ let mut c = constructor(1 << bit, 0);
+ "1234567812345678".hash(&mut a);
+ "1234567812345678".hash(&mut b);
+ "1234567812345678".hash(&mut c);
+ assert_sufficiently_different(a.finish(), b.finish(), 2);
+ assert_sufficiently_different(a.finish(), c.finish(), 2);
+ assert_sufficiently_different(b.finish(), c.finish(), 2);
+ }
+}
+
+fn test_all_bytes_matter<T: HasherExt>(hasher: impl Fn() -> T) {
+ let mut item = vec![0; 256];
+ let base_hash = hash(&item, &hasher);
+ for pos in 0..256 {
+ item[pos] = 255;
+ let hash = hash(&item, &hasher);
+ assert_ne!(base_hash, hash, "Position {} did not affect output", pos);
+ item[pos] = 0;
+ }
+}
+
+fn test_no_pair_collisions<T: HasherExt>(hasher: impl Fn() -> T) {
+ let base = [0_u64, 0_u64];
+ let base_hash = hash(&base, &hasher);
+ for bitpos1 in 0..64 {
+ let a = 1_u64 << bitpos1;
+ for bitpos2 in 0..bitpos1 {
+ let b = 1_u64 << bitpos2;
+ let aa = hash(&[a, a], &hasher);
+ let ab = hash(&[a, b], &hasher);
+ let ba = hash(&[b, a], &hasher);
+ let bb = hash(&[b, b], &hasher);
+ assert_sufficiently_different(base_hash, aa, 3);
+ assert_sufficiently_different(base_hash, ab, 3);
+ assert_sufficiently_different(base_hash, ba, 3);
+ assert_sufficiently_different(base_hash, bb, 3);
+ assert_sufficiently_different(aa, ab, 3);
+ assert_sufficiently_different(ab, ba, 3);
+ assert_sufficiently_different(ba, bb, 3);
+ assert_sufficiently_different(aa, ba, 3);
+ assert_sufficiently_different(ab, bb, 3);
+ assert_sufficiently_different(aa, bb, 3);
+ }
+ }
+}
+
+fn hash<H: Hash, T: HasherExt>(b: &H, hasher: &dyn Fn() -> T) -> u64 {
+ b.get_hash(hasher())
+}
+
+fn test_single_bit_flip<T: HasherExt>(hasher: impl Fn() -> T) {
+ let size = 32;
+ let compare_value = hash(&0u32, &hasher);
+ for pos in 0..size {
+ let test_value = hash(&(1u32 << pos), &hasher);
+ assert_sufficiently_different(compare_value, test_value, 2);
+ }
+ let size = 64;
+ let compare_value = hash(&0u64, &hasher);
+ for pos in 0..size {
+ let test_value = hash(&(1u64 << pos), &hasher);
+ assert_sufficiently_different(compare_value, test_value, 2);
+ }
+ let size = 128;
+ let compare_value = hash(&0u128, &hasher);
+ for pos in 0..size {
+ let test_value = hash(&(1u128 << pos), &hasher);
+ assert_sufficiently_different(compare_value, test_value, 2);
+ }
+}
+
+fn test_padding_doesnot_collide<T: Hasher>(hasher: impl Fn() -> T) {
+ for c in 0..128u8 {
+ for string in ["", "\0", "\x01", "1234", "12345678", "1234567812345678"].iter() {
+ let mut short = hasher();
+ string.hash(&mut short);
+ let value = short.finish();
+ let mut padded = string.to_string();
+ for num in 1..=128 {
+ let mut long = hasher();
+ padded.push(c as char);
+ padded.hash(&mut long);
+ let (same_bytes, same_nibbles) = count_same_bytes_and_nibbles(value, long.finish());
+ assert!(
+ same_bytes <= 3,
+ format!("{} bytes of {} -> {:x} vs {:x}", num, c, value, long.finish())
+ );
+ assert!(
+ same_nibbles <= 8,
+ format!("{} bytes of {} -> {:x} vs {:x}", num, c, value, long.finish())
+ );
+ let flipped_bits = (value ^ long.finish()).count_ones();
+ assert!(flipped_bits > 10);
+ }
+ if string.len() > 0 {
+ let mut padded = string[1..].to_string();
+ padded.push(c as char);
+ for num in 2..=128 {
+ let mut long = hasher();
+ padded.push(c as char);
+ padded.hash(&mut long);
+ let (same_bytes, same_nibbles) = count_same_bytes_and_nibbles(value, long.finish());
+ assert!(
+ same_bytes <= 3,
+ format!(
+ "string {:?} + {} bytes of {} -> {:x} vs {:x}",
+ string,
+ num,
+ c,
+ value,
+ long.finish()
+ )
+ );
+ assert!(
+ same_nibbles <= 8,
+ format!(
+ "string {:?} + {} bytes of {} -> {:x} vs {:x}",
+ string,
+ num,
+ c,
+ value,
+ long.finish()
+ )
+ );
+ let flipped_bits = (value ^ long.finish()).count_ones();
+ assert!(flipped_bits > 10);
+ }
+ }
+ }
+ }
+}
+
+#[cfg(test)]
+mod fallback_tests {
+ use crate::fallback_hash::*;
+ use crate::hash_quality_test::*;
+
+ #[test]
+ fn fallback_single_bit_flip() {
+ test_single_bit_flip(|| AHasher::test_with_keys(0, 0))
+ }
+
+ #[test]
+ fn fallback_single_key_bit_flip() {
+ test_single_key_bit_flip(AHasher::test_with_keys)
+ }
+
+ #[test]
+ fn fallback_all_bytes_matter() {
+ test_all_bytes_matter(|| AHasher::test_with_keys(0, 0));
+ }
+
+ #[test]
+ fn fallback_test_no_pair_collisions() {
+ test_no_pair_collisions(|| AHasher::test_with_keys(0, 0));
+ }
+
+ #[test]
+ fn fallback_test_no_full_collisions() {
+ test_no_full_collisions(|| AHasher::test_with_keys(12345, 67890));
+ }
+
+ #[test]
+ fn fallback_keys_change_output() {
+ test_keys_change_output(AHasher::test_with_keys);
+ }
+
+ #[test]
+ fn fallback_input_affect_every_byte() {
+ test_input_affect_every_byte(AHasher::test_with_keys);
+ }
+
+ #[test]
+ fn fallback_keys_affect_every_byte() {
+ test_keys_affect_every_byte(0, AHasher::test_with_keys);
+ test_keys_affect_every_byte("", AHasher::test_with_keys);
+ test_keys_affect_every_byte((0, 0), AHasher::test_with_keys);
+ }
+
+ #[test]
+ fn fallback_finish_is_consistant() {
+ test_finish_is_consistent(AHasher::test_with_keys)
+ }
+
+ #[test]
+ fn fallback_padding_doesnot_collide() {
+ test_padding_doesnot_collide(|| AHasher::test_with_keys(0, 0));
+ test_padding_doesnot_collide(|| AHasher::test_with_keys(0, 1));
+ test_padding_doesnot_collide(|| AHasher::test_with_keys(1, 0));
+ test_padding_doesnot_collide(|| AHasher::test_with_keys(1, 1));
+ }
+}
+
+///Basic sanity tests of the cypto properties of aHash.
+#[cfg(all(any(target_arch = "x86", target_arch = "x86_64"), target_feature = "aes", not(miri)))]
+#[cfg(test)]
+mod aes_tests {
+ use crate::aes_hash::*;
+ use crate::hash_quality_test::*;
+ use std::hash::{Hash, Hasher};
+
+ const BAD_KEY: u64 = 0x5252_5252_5252_5252; //This encrypts to 0.
+ const BAD_KEY2: u64 = 0x6363_6363_6363_6363; //This decrypts to 0.
+
+ #[test]
+ fn test_single_bit_in_byte() {
+ let mut hasher1 = AHasher::new_with_keys(0, 0);
+ 8_u32.hash(&mut hasher1);
+ let mut hasher2 = AHasher::new_with_keys(0, 0);
+ 0_u32.hash(&mut hasher2);
+ assert_sufficiently_different(hasher1.finish(), hasher2.finish(), 1);
+ }
+
+ #[test]
+ fn aes_single_bit_flip() {
+ test_single_bit_flip(|| AHasher::test_with_keys(BAD_KEY, BAD_KEY));
+ test_single_bit_flip(|| AHasher::test_with_keys(BAD_KEY2, BAD_KEY2));
+ }
+
+ #[test]
+ fn aes_single_key_bit_flip() {
+ test_single_key_bit_flip(|k1, k2| AHasher::test_with_keys(k1, k2))
+ }
+
+ #[test]
+ fn aes_all_bytes_matter() {
+ test_all_bytes_matter(|| AHasher::test_with_keys(BAD_KEY, BAD_KEY));
+ test_all_bytes_matter(|| AHasher::test_with_keys(BAD_KEY2, BAD_KEY2));
+ }
+
+ #[test]
+ fn aes_test_no_pair_collisions() {
+ test_no_pair_collisions(|| AHasher::test_with_keys(BAD_KEY, BAD_KEY));
+ test_no_pair_collisions(|| AHasher::test_with_keys(BAD_KEY2, BAD_KEY2));
+ }
+
+ #[test]
+ fn ase_test_no_full_collisions() {
+ test_no_full_collisions(|| AHasher::test_with_keys(12345, 67890));
+ }
+
+ #[test]
+ fn aes_keys_change_output() {
+ test_keys_change_output(AHasher::test_with_keys);
+ }
+
+ #[test]
+ fn aes_input_affect_every_byte() {
+ test_input_affect_every_byte(AHasher::test_with_keys);
+ }
+
+ #[test]
+ fn aes_keys_affect_every_byte() {
+ test_keys_affect_every_byte(0, AHasher::test_with_keys);
+ test_keys_affect_every_byte("", AHasher::test_with_keys);
+ test_keys_affect_every_byte((0, 0), AHasher::test_with_keys);
+ }
+ #[test]
+ fn aes_finish_is_consistant() {
+ test_finish_is_consistent(AHasher::test_with_keys)
+ }
+
+ #[test]
+ fn aes_padding_doesnot_collide() {
+ test_padding_doesnot_collide(|| AHasher::test_with_keys(BAD_KEY, BAD_KEY));
+ test_padding_doesnot_collide(|| AHasher::test_with_keys(BAD_KEY2, BAD_KEY2));
+ }
+}
diff --git a/third_party/rust/ahash/src/hash_set.rs b/third_party/rust/ahash/src/hash_set.rs
new file mode 100644
index 0000000000..2950ec9282
--- /dev/null
+++ b/third_party/rust/ahash/src/hash_set.rs
@@ -0,0 +1,267 @@
+use std::collections::{hash_set, HashSet};
+use std::fmt::{self, Debug};
+use std::hash::{BuildHasher, Hash};
+use std::iter::FromIterator;
+use std::ops::{BitAnd, BitOr, BitXor, Deref, DerefMut, Sub};
+
+/// A [`HashSet`](std::collections::HashSet) using [`RandomState`](crate::RandomState) to hash the items.
+/// Requires the `std` feature to be enabled.
+#[derive(Clone)]
+pub struct AHashSet<T, S = crate::RandomState>(HashSet<T, S>);
+
+impl<T, S> AHashSet<T, S>
+where
+ T: Hash + Eq,
+ S: BuildHasher + Default,
+{
+ pub fn new() -> Self {
+ AHashSet(HashSet::with_hasher(S::default()))
+ }
+
+ pub fn with_capacity(capacity: usize) -> Self {
+ AHashSet(HashSet::with_capacity_and_hasher(capacity, S::default()))
+ }
+}
+
+impl<T, S> AHashSet<T, S>
+where
+ T: Hash + Eq,
+ S: BuildHasher,
+{
+ pub fn with_hasher(hash_builder: S) -> Self {
+ AHashSet(HashSet::with_hasher(hash_builder))
+ }
+
+ pub fn with_capacity_and_hasher(capacity: usize, hash_builder: S) -> Self {
+ AHashSet(HashSet::with_capacity_and_hasher(capacity, hash_builder))
+ }
+}
+
+impl<T, S> Deref for AHashSet<T, S> {
+ type Target = HashSet<T, S>;
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+}
+
+impl<T, S> DerefMut for AHashSet<T, S> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.0
+ }
+}
+
+impl<T, S> PartialEq for AHashSet<T, S>
+where
+ T: Eq + Hash,
+ S: BuildHasher,
+{
+ fn eq(&self, other: &AHashSet<T, S>) -> bool {
+ self.0.eq(&other.0)
+ }
+}
+
+impl<T, S> Eq for AHashSet<T, S>
+where
+ T: Eq + Hash,
+ S: BuildHasher,
+{
+}
+
+impl<T, S> BitOr<&AHashSet<T, S>> for &AHashSet<T, S>
+where
+ T: Eq + Hash + Clone,
+ S: BuildHasher + Default,
+{
+ type Output = AHashSet<T, S>;
+
+ /// Returns the union of `self` and `rhs` as a new `AHashSet<T, S>`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use ahash::AHashSet;
+ ///
+ /// let a: AHashSet<_> = vec![1, 2, 3].into_iter().collect();
+ /// let b: AHashSet<_> = vec![3, 4, 5].into_iter().collect();
+ ///
+ /// let set = &a | &b;
+ ///
+ /// let mut i = 0;
+ /// let expected = [1, 2, 3, 4, 5];
+ /// for x in &set {
+ /// assert!(expected.contains(x));
+ /// i += 1;
+ /// }
+ /// assert_eq!(i, expected.len());
+ /// ```
+ fn bitor(self, rhs: &AHashSet<T, S>) -> AHashSet<T, S> {
+ AHashSet(self.0.bitor(&rhs.0))
+ }
+}
+
+impl<T, S> BitAnd<&AHashSet<T, S>> for &AHashSet<T, S>
+where
+ T: Eq + Hash + Clone,
+ S: BuildHasher + Default,
+{
+ type Output = AHashSet<T, S>;
+
+ /// Returns the intersection of `self` and `rhs` as a new `AHashSet<T, S>`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use ahash::AHashSet;
+ ///
+ /// let a: AHashSet<_> = vec![1, 2, 3].into_iter().collect();
+ /// let b: AHashSet<_> = vec![2, 3, 4].into_iter().collect();
+ ///
+ /// let set = &a & &b;
+ ///
+ /// let mut i = 0;
+ /// let expected = [2, 3];
+ /// for x in &set {
+ /// assert!(expected.contains(x));
+ /// i += 1;
+ /// }
+ /// assert_eq!(i, expected.len());
+ /// ```
+ fn bitand(self, rhs: &AHashSet<T, S>) -> AHashSet<T, S> {
+ AHashSet(self.0.bitand(&rhs.0))
+ }
+}
+
+impl<T, S> BitXor<&AHashSet<T, S>> for &AHashSet<T, S>
+where
+ T: Eq + Hash + Clone,
+ S: BuildHasher + Default,
+{
+ type Output = AHashSet<T, S>;
+
+ /// Returns the symmetric difference of `self` and `rhs` as a new `AHashSet<T, S>`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use ahash::AHashSet;
+ ///
+ /// let a: AHashSet<_> = vec![1, 2, 3].into_iter().collect();
+ /// let b: AHashSet<_> = vec![3, 4, 5].into_iter().collect();
+ ///
+ /// let set = &a ^ &b;
+ ///
+ /// let mut i = 0;
+ /// let expected = [1, 2, 4, 5];
+ /// for x in &set {
+ /// assert!(expected.contains(x));
+ /// i += 1;
+ /// }
+ /// assert_eq!(i, expected.len());
+ /// ```
+ fn bitxor(self, rhs: &AHashSet<T, S>) -> AHashSet<T, S> {
+ AHashSet(self.0.bitxor(&rhs.0))
+ }
+}
+
+impl<T, S> Sub<&AHashSet<T, S>> for &AHashSet<T, S>
+where
+ T: Eq + Hash + Clone,
+ S: BuildHasher + Default,
+{
+ type Output = AHashSet<T, S>;
+
+ /// Returns the difference of `self` and `rhs` as a new `AHashSet<T, S>`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use ahash::AHashSet;
+ ///
+ /// let a: AHashSet<_> = vec![1, 2, 3].into_iter().collect();
+ /// let b: AHashSet<_> = vec![3, 4, 5].into_iter().collect();
+ ///
+ /// let set = &a - &b;
+ ///
+ /// let mut i = 0;
+ /// let expected = [1, 2];
+ /// for x in &set {
+ /// assert!(expected.contains(x));
+ /// i += 1;
+ /// }
+ /// assert_eq!(i, expected.len());
+ /// ```
+ fn sub(self, rhs: &AHashSet<T, S>) -> AHashSet<T, S> {
+ AHashSet(self.0.sub(&rhs.0))
+ }
+}
+
+impl<T, S> Debug for AHashSet<T, S>
+where
+ T: Eq + Hash + Debug,
+ S: BuildHasher,
+{
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0.fmt(fmt)
+ }
+}
+
+impl<T, S> FromIterator<T> for AHashSet<T, S>
+where
+ T: Eq + Hash,
+ S: BuildHasher + Default,
+{
+ #[inline]
+ fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> AHashSet<T, S> {
+ AHashSet(HashSet::from_iter(iter))
+ }
+}
+
+impl<'a, T, S> IntoIterator for &'a AHashSet<T, S> {
+ type Item = &'a T;
+ type IntoIter = hash_set::Iter<'a, T>;
+ fn into_iter(self) -> Self::IntoIter {
+ (&self.0).iter()
+ }
+}
+
+impl<T, S> IntoIterator for AHashSet<T, S> {
+ type Item = T;
+ type IntoIter = hash_set::IntoIter<T>;
+ fn into_iter(self) -> Self::IntoIter {
+ self.0.into_iter()
+ }
+}
+
+impl<T, S> Extend<T> for AHashSet<T, S>
+where
+ T: Eq + Hash,
+ S: BuildHasher,
+{
+ #[inline]
+ fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
+ self.0.extend(iter)
+ }
+}
+
+impl<'a, T, S> Extend<&'a T> for AHashSet<T, S>
+where
+ T: 'a + Eq + Hash + Copy,
+ S: BuildHasher,
+{
+ #[inline]
+ fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
+ self.0.extend(iter)
+ }
+}
+
+impl<T, S> Default for AHashSet<T, S>
+where
+ T: Eq + Hash,
+ S: BuildHasher + Default,
+{
+ /// Creates an empty `AHashSet<T, S>` with the `Default` value for the hasher.
+ #[inline]
+ fn default() -> AHashSet<T, S> {
+ AHashSet(HashSet::default())
+ }
+}
diff --git a/third_party/rust/ahash/src/lib.rs b/third_party/rust/ahash/src/lib.rs
new file mode 100644
index 0000000000..542fa35e92
--- /dev/null
+++ b/third_party/rust/ahash/src/lib.rs
@@ -0,0 +1,203 @@
+//! # aHash
+//!
+//! This hashing algorithm is intended to be a high performance, (hardware specific), keyed hash function.
+//! This can be seen as a DOS resistant alternative to `FxHash`, or a fast equivalent to `SipHash`.
+//! It provides a high speed hash algorithm, but where the result is not predictable without knowing a Key.
+//! This allows it to be used in a `HashMap` without allowing for the possibility that an malicious user can
+//! induce a collision.
+//!
+//! # How aHash works
+//!
+//! aHash uses the hardware AES instruction on x86 processors to provide a keyed hash function.
+//! aHash is not a cryptographically secure hash.
+#![deny(clippy::correctness, clippy::complexity, clippy::perf)]
+#![allow(clippy::pedantic, clippy::cast_lossless, clippy::unreadable_literal)]
+#![cfg_attr(all(not(test), not(feature = "std")), no_std)]
+#![cfg_attr(feature = "specialize", feature(specialization))]
+
+#[macro_use]
+mod convert;
+
+#[cfg(all(any(target_arch = "x86", target_arch = "x86_64"), target_feature = "aes", not(miri)))]
+mod aes_hash;
+mod fallback_hash;
+#[cfg(test)]
+mod hash_quality_test;
+
+mod operations;
+#[cfg(feature = "std")]
+mod hash_map;
+#[cfg(feature = "std")]
+mod hash_set;
+mod random_state;
+mod specialize;
+
+#[cfg(feature = "compile-time-rng")]
+use const_random::const_random;
+
+#[cfg(all(any(target_arch = "x86", target_arch = "x86_64"), target_feature = "aes", not(miri)))]
+pub use crate::aes_hash::AHasher;
+
+#[cfg(not(all(any(target_arch = "x86", target_arch = "x86_64"), target_feature = "aes", not(miri))))]
+pub use crate::fallback_hash::AHasher;
+pub use crate::random_state::RandomState;
+
+pub use crate::specialize::CallHasher;
+
+#[cfg(feature = "std")]
+pub use crate::hash_map::AHashMap;
+#[cfg(feature = "std")]
+pub use crate::hash_set::AHashSet;
+use core::hash::Hasher;
+
+/// Provides a default [Hasher] compile time generated constants for keys.
+/// This is typically used in conjunction with [BuildHasherDefault] to create
+/// [AHasher]s in order to hash the keys of the map.
+///
+/// Generally it is preferable to use [RandomState] instead, so that different
+/// hashmaps will have different keys. However if fixed keys are desireable this
+/// may be used instead.
+///
+/// # Example
+/// ```
+/// use std::hash::BuildHasherDefault;
+/// use ahash::{AHasher, RandomState};
+/// use std::collections::HashMap;
+///
+/// let mut map: HashMap<i32, i32, BuildHasherDefault<AHasher>> = HashMap::default();
+/// map.insert(12, 34);
+/// ```
+///
+/// [BuildHasherDefault]: std::hash::BuildHasherDefault
+/// [Hasher]: std::hash::Hasher
+/// [HashMap]: std::collections::HashMap
+impl Default for AHasher {
+
+ /// Constructs a new [AHasher] with compile time generated constants for keys if the
+ /// `compile-time-rng`feature is enabled. Otherwise the keys will be fixed constants.
+ /// This means the keys will be the same from one instance to another,
+ /// but different from build to the next. So if it is possible for a potential
+ /// attacker to have access to the compiled binary it would be better
+ /// to specify keys generated at runtime.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use ahash::AHasher;
+ /// use std::hash::Hasher;
+ ///
+ /// let mut hasher_1 = AHasher::default();
+ /// let mut hasher_2 = AHasher::default();
+ ///
+ /// hasher_1.write_u32(1234);
+ /// hasher_2.write_u32(1234);
+ ///
+ /// assert_eq!(hasher_1.finish(), hasher_2.finish());
+ /// ```
+ #[inline]
+ #[cfg(feature = "compile-time-rng")]
+ fn default() -> AHasher {
+ AHasher::new_with_keys(const_random!(u128), const_random!(u128))
+ }
+
+ /// Constructs a new [AHasher] with compile time generated constants for keys if the
+ /// `compile-time-rng`feature is enabled. Otherwise the keys will be fixed constants.
+ /// This means the keys will be the same from one instance to another,
+ /// but different from build to the next. So if it is possible for a potential
+ /// attacker to have access to the compiled binary it would be better
+ /// to specify keys generated at runtime.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use ahash::AHasher;
+ /// use std::hash::Hasher;
+ ///
+ /// let mut hasher_1 = AHasher::default();
+ /// let mut hasher_2 = AHasher::default();
+ ///
+ /// hasher_1.write_u32(1234);
+ /// hasher_2.write_u32(1234);
+ ///
+ /// assert_eq!(hasher_1.finish(), hasher_2.finish());
+ /// ```
+ #[inline]
+ #[cfg(not(feature = "compile-time-rng"))]
+ fn default() -> AHasher {
+ const K1: u128 = (random_state::INIT_SEED[0] as u128).wrapping_mul(random_state::MULTIPLE as u128);
+ const K2: u128 = (random_state::INIT_SEED[1] as u128).wrapping_mul(random_state::MULTIPLE as u128);
+ AHasher::new_with_keys(K1, K2)
+ }
+}
+
+/// Used for specialization. (Sealed)
+pub(crate) trait HasherExt: Hasher {
+ #[doc(hidden)]
+ fn hash_u64(self, value: u64) -> u64;
+
+ #[doc(hidden)]
+ fn short_finish(&self) -> u64;
+}
+
+impl<T: Hasher> HasherExt for T {
+ #[inline]
+ #[cfg(feature = "specialize")]
+ default fn hash_u64(self, value: u64) -> u64 {
+ value.get_hash(self)
+ }
+ #[inline]
+ #[cfg(not(feature = "specialize"))]
+ fn hash_u64(self, value: u64) -> u64 {
+ value.get_hash(self)
+ }
+ #[inline]
+ #[cfg(feature = "specialize")]
+ default fn short_finish(&self) -> u64 {
+ self.finish()
+ }
+ #[inline]
+ #[cfg(not(feature = "specialize"))]
+ fn short_finish(&self) -> u64 {
+ self.finish()
+ }
+}
+
+// #[inline(never)]
+// #[doc(hidden)]
+// pub fn hash_test(input: &[u8]) -> u64 {
+// let a = AHasher::new_with_keys(11111111111_u128, 2222222222_u128);
+// input.get_hash(a)
+// }
+
+#[cfg(test)]
+mod test {
+ use crate::convert::Convert;
+ use crate::*;
+ use std::collections::HashMap;
+
+ #[cfg(feature = "std")]
+ #[test]
+ fn test_default_builder() {
+ use core::hash::BuildHasherDefault;
+
+ let mut map = HashMap::<u32, u64, BuildHasherDefault<AHasher>>::default();
+ map.insert(1, 3);
+ }
+ #[test]
+ fn test_builder() {
+ let mut map = HashMap::<u32, u64, RandomState>::default();
+ map.insert(1, 3);
+ }
+
+ #[test]
+ fn test_conversion() {
+ let input: &[u8] = b"dddddddd";
+ let bytes: u64 = as_array!(input, 8).convert();
+ assert_eq!(bytes, 0x6464646464646464);
+ }
+
+ #[test]
+ fn test_ahasher_construction() {
+ let _ = AHasher::new_with_keys(1234, 5678);
+ }
+}
diff --git a/third_party/rust/ahash/src/operations.rs b/third_party/rust/ahash/src/operations.rs
new file mode 100644
index 0000000000..0646c446cd
--- /dev/null
+++ b/third_party/rust/ahash/src/operations.rs
@@ -0,0 +1,277 @@
+use crate::convert::*;
+
+/// This is a constant with a lot of special properties found by automated search.
+/// See the unit tests below. (Below are alternative values)
+#[cfg(all(target_feature = "ssse3", not(miri)))]
+const SHUFFLE_MASK: u128 = 0x020a0700_0c01030e_050f0d08_06090b04_u128;
+//const SHUFFLE_MASK: u128 = 0x000d0702_0a040301_05080f0c_0e0b0609_u128;
+//const SHUFFLE_MASK: u128 = 0x040A0700_030E0106_0D050F08_020B0C09_u128;
+
+pub(crate) const fn folded_multiply(s: u64, by: u64) -> u64 {
+ let result = (s as u128).wrapping_mul(by as u128);
+ ((result & 0xffff_ffff_ffff_ffff) as u64) ^ ((result >> 64) as u64)
+}
+
+#[inline(always)]
+pub(crate) fn shuffle(a: u128) -> u128 {
+ #[cfg(all(target_feature = "ssse3", not(miri)))]
+ {
+ use core::mem::transmute;
+ #[cfg(target_arch = "x86")]
+ use core::arch::x86::*;
+ #[cfg(target_arch = "x86_64")]
+ use core::arch::x86_64::*;
+ unsafe {
+ transmute(_mm_shuffle_epi8(transmute(a), transmute(SHUFFLE_MASK)))
+ }
+ }
+ #[cfg(not(all(target_feature = "ssse3", not(miri))))]
+ {
+ a.swap_bytes()
+ }
+}
+
+#[allow(unused)] //not used by fallback
+#[inline(always)]
+pub(crate) fn add_and_shuffle(a: u128, b: u128) -> u128 {
+ let sum = add_by_64s(a.convert(), b.convert());
+ shuffle(sum.convert())
+}
+
+#[allow(unused)] //not used by fallbac
+#[inline(always)]
+pub(crate) fn shuffle_and_add(base: u128, to_add: u128) -> u128 {
+ let shuffled: [u64; 2] = shuffle(base).convert();
+ add_by_64s(shuffled, to_add.convert()).convert()
+}
+
+#[cfg(all(any(target_arch = "x86", target_arch = "x86_64"), target_feature = "sse2", not(miri)))]
+#[inline(always)]
+pub(crate) fn add_by_64s(a: [u64; 2], b: [u64; 2]) -> [u64; 2] {
+ use core::mem::transmute;
+ unsafe {
+ #[cfg(target_arch = "x86")]
+ use core::arch::x86::*;
+ #[cfg(target_arch = "x86_64")]
+ use core::arch::x86_64::*;
+ transmute(_mm_add_epi64(transmute(a), transmute(b)))
+ }
+}
+
+#[cfg(not(all(any(target_arch = "x86", target_arch = "x86_64"), target_feature = "sse2", not(miri))))]
+#[inline(always)]
+pub(crate) fn add_by_64s(a: [u64; 2], b: [u64; 2]) -> [u64; 2] {
+ [a[0].wrapping_add(b[0]), a[1].wrapping_add(b[1])]
+}
+
+#[cfg(all(any(target_arch = "x86", target_arch = "x86_64"), target_feature = "aes", not(miri)))]
+#[allow(unused)]
+#[inline(always)]
+pub(crate) fn aesenc(value: u128, xor: u128) -> u128 {
+ #[cfg(target_arch = "x86")]
+ use core::arch::x86::*;
+ #[cfg(target_arch = "x86_64")]
+ use core::arch::x86_64::*;
+ use core::mem::transmute;
+ unsafe {
+ let value = transmute(value);
+ transmute(_mm_aesenc_si128(value, transmute(xor)))
+ }
+}
+#[cfg(all(any(target_arch = "x86", target_arch = "x86_64"), target_feature = "aes", not(miri)))]
+#[allow(unused)]
+#[inline(always)]
+pub(crate) fn aesdec(value: u128, xor: u128) -> u128 {
+ #[cfg(target_arch = "x86")]
+ use core::arch::x86::*;
+ #[cfg(target_arch = "x86_64")]
+ use core::arch::x86_64::*;
+ use core::mem::transmute;
+ unsafe {
+ let value = transmute(value);
+ transmute(_mm_aesdec_si128(value, transmute(xor)))
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+ use crate::convert::Convert;
+
+ // This is code to search for the shuffle constant
+ //
+ //thread_local! { static MASK: Cell<u128> = Cell::new(0); }
+ //
+ // fn shuffle(a: u128) -> u128 {
+ // use std::intrinsics::transmute;
+ // #[cfg(target_arch = "x86")]
+ // use core::arch::x86::*;
+ // #[cfg(target_arch = "x86_64")]
+ // use core::arch::x86_64::*;
+ // MASK.with(|mask| {
+ // unsafe { transmute(_mm_shuffle_epi8(transmute(a), transmute(mask.get()))) }
+ // })
+ // }
+ //
+ // #[test]
+ // fn find_shuffle() {
+ // use rand::prelude::*;
+ // use SliceRandom;
+ // use std::panic;
+ // use std::io::Write;
+ //
+ // let mut value: [u8; 16] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 ,13, 14, 15];
+ // let mut rand = thread_rng();
+ // let mut successful_list = HashMap::new();
+ // for _attempt in 0..10000000 {
+ // rand.shuffle(&mut value);
+ // let test_val = value.convert();
+ // MASK.with(|mask| {
+ // mask.set(test_val);
+ // });
+ // if let Ok(successful) = panic::catch_unwind(|| {
+ // test_shuffle_does_not_collide_with_aes();
+ // test_shuffle_moves_high_bits();
+ // test_shuffle_moves_every_value();
+ // //test_shuffle_does_not_loop();
+ // value
+ // }) {
+ // let successful: u128 = successful.convert();
+ // successful_list.insert(successful, iters_before_loop());
+ // }
+ // }
+ // let write_file = File::create("/tmp/output").unwrap();
+ // let mut writer = BufWriter::new(&write_file);
+ //
+ // for success in successful_list {
+ // writeln!(writer, "Found successful: {:x?} - {:?}", success.0, success.1);
+ // }
+ // }
+ //
+ // fn iters_before_loop() -> u32 {
+ // let numbered = 0x00112233_44556677_8899AABB_CCDDEEFF;
+ // let mut shuffled = shuffle(numbered);
+ // let mut count = 0;
+ // loop {
+ // // println!("{:>16x}", shuffled);
+ // if numbered == shuffled {
+ // break;
+ // }
+ // count += 1;
+ // shuffled = shuffle(shuffled);
+ // }
+ // count
+ // }
+
+ #[cfg(all(
+ any(target_arch = "x86", target_arch = "x86_64"),
+ target_feature = "ssse3",
+ target_feature = "aes",
+ not(miri)
+ ))]
+ #[test]
+ fn test_shuffle_does_not_collide_with_aes() {
+ let mut value: [u8; 16] = [0; 16];
+ let zero_mask_enc = aesenc(0, 0);
+ let zero_mask_dec = aesdec(0, 0);
+ for index in 0..16 {
+ value[index] = 1;
+ let excluded_positions_enc: [u8; 16] = aesenc(value.convert(), zero_mask_enc).convert();
+ let excluded_positions_dec: [u8; 16] = aesdec(value.convert(), zero_mask_dec).convert();
+ let actual_location: [u8; 16] = shuffle(value.convert()).convert();
+ for pos in 0..16 {
+ if actual_location[pos] != 0 {
+ assert_eq!(
+ 0, excluded_positions_enc[pos],
+ "Forward Overlap between {:?} and {:?} at {}",
+ excluded_positions_enc, actual_location, index
+ );
+ assert_eq!(
+ 0, excluded_positions_dec[pos],
+ "Reverse Overlap between {:?} and {:?} at {}",
+ excluded_positions_dec, actual_location, index
+ );
+ }
+ }
+ value[index] = 0;
+ }
+ }
+
+ #[test]
+ fn test_shuffle_contains_each_value() {
+ let value: [u8; 16] = 0x00010203_04050607_08090A0B_0C0D0E0F_u128.convert();
+ let shuffled: [u8; 16] = shuffle(value.convert()).convert();
+ for index in 0..16_u8 {
+ assert!(shuffled.contains(&index), "Value is missing {}", index);
+ }
+ }
+
+ #[test]
+ fn test_shuffle_moves_every_value() {
+ let mut value: [u8; 16] = [0; 16];
+ for index in 0..16 {
+ value[index] = 1;
+ let shuffled: [u8; 16] = shuffle(value.convert()).convert();
+ assert_eq!(0, shuffled[index], "Value is not moved {}", index);
+ value[index] = 0;
+ }
+ }
+
+ #[test]
+ fn test_shuffle_moves_high_bits() {
+ assert!(
+ shuffle(1) > (1_u128 << 80),
+ "Low bits must be moved to other half {:?} -> {:?}",
+ 0,
+ shuffle(1)
+ );
+
+ assert!(
+ shuffle(1_u128 << 58) >= (1_u128 << 64),
+ "High bits must be moved to other half {:?} -> {:?}",
+ 7,
+ shuffle(1_u128 << 58)
+ );
+ assert!(
+ shuffle(1_u128 << 58) < (1_u128 << 112),
+ "High bits must not remain high {:?} -> {:?}",
+ 7,
+ shuffle(1_u128 << 58)
+ );
+ assert!(
+ shuffle(1_u128 << 64) < (1_u128 << 64),
+ "Low bits must be moved to other half {:?} -> {:?}",
+ 8,
+ shuffle(1_u128 << 64)
+ );
+ assert!(
+ shuffle(1_u128 << 64) >= (1_u128 << 16),
+ "Low bits must not remain low {:?} -> {:?}",
+ 8,
+ shuffle(1_u128 << 64)
+ );
+
+ assert!(
+ shuffle(1_u128 << 120) < (1_u128 << 50),
+ "High bits must be moved to low half {:?} -> {:?}",
+ 15,
+ shuffle(1_u128 << 120)
+ );
+ }
+
+ #[cfg(all(
+ any(target_arch = "x86", target_arch = "x86_64"),
+ target_feature = "ssse3",
+ not(miri)
+ ))]
+ #[test]
+ fn test_shuffle_does_not_loop() {
+ let numbered = 0x00112233_44556677_8899AABB_CCDDEEFF;
+ let mut shuffled = shuffle(numbered);
+ for count in 0..100 {
+ // println!("{:>16x}", shuffled);
+ assert_ne!(numbered, shuffled, "Equal after {} vs {:x}", count, shuffled);
+ shuffled = shuffle(shuffled);
+ }
+ }
+}
diff --git a/third_party/rust/ahash/src/random_state.rs b/third_party/rust/ahash/src/random_state.rs
new file mode 100644
index 0000000000..0936556c38
--- /dev/null
+++ b/third_party/rust/ahash/src/random_state.rs
@@ -0,0 +1,153 @@
+use crate::convert::Convert;
+use crate::AHasher;
+use core::fmt;
+use core::hash::BuildHasher;
+use core::sync::atomic::AtomicUsize;
+use core::sync::atomic::Ordering;
+
+use crate::operations::folded_multiply;
+#[cfg(all(feature = "compile-time-rng", not(test)))]
+use const_random::const_random;
+
+///This constant come from Kunth's prng
+pub(crate) const MULTIPLE: u64 = 6364136223846793005;
+pub(crate) const INCREMENT: u64 = 1442695040888963407;
+
+// Const random provides randomized starting key with no runtime cost.
+#[cfg(all(feature = "compile-time-rng", not(test)))]
+pub(crate) const INIT_SEED: [u64; 2] = [const_random!(u64), const_random!(u64)];
+
+#[cfg(any(not(feature = "compile-time-rng"), test))]
+pub(crate) const INIT_SEED: [u64; 2] = [0x2360_ED05_1FC6_5DA4, 0x4385_DF64_9FCC_F645]; //From PCG-64
+
+#[cfg(all(feature = "compile-time-rng", not(test)))]
+static SEED: AtomicUsize = AtomicUsize::new(const_random!(u64) as usize);
+
+#[cfg(any(not(feature = "compile-time-rng"), test))]
+static SEED: AtomicUsize = AtomicUsize::new(INCREMENT as usize);
+
+/// Provides a [Hasher] factory. This is typically used (e.g. by [HashMap]) to create
+/// [AHasher]s in order to hash the keys of the map. See `build_hasher` below.
+///
+/// [build_hasher]: ahash::
+/// [Hasher]: std::hash::Hasher
+/// [BuildHasher]: std::hash::BuildHasher
+/// [HashMap]: std::collections::HashMap
+#[derive(Clone)]
+pub struct RandomState {
+ pub(crate) k0: u64,
+ pub(crate) k1: u64,
+ pub(crate) k2: u64,
+ pub(crate) k3: u64,
+}
+
+impl fmt::Debug for RandomState {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad("RandomState { .. }")
+ }
+}
+
+impl RandomState {
+ #[inline]
+ pub fn new() -> RandomState {
+ //Using a self pointer. When running with ASLR this is a random value.
+ let previous = SEED.load(Ordering::Relaxed) as u64;
+ let stack_mem_loc = &previous as *const _ as u64;
+ //This is similar to the update function in the fallback.
+ //only one multiply is needed because memory locations are not under an attackers control.
+ let current_seed = previous
+ .wrapping_add(stack_mem_loc)
+ .wrapping_mul(MULTIPLE)
+ .rotate_right(31);
+ SEED.store(current_seed as usize, Ordering::Relaxed);
+ let (k0, k1, k2, k3) = scramble_keys(&SEED as *const _ as u64, current_seed);
+ RandomState { k0, k1, k2, k3 }
+ }
+
+ /// Allows for explicitly setting the seeds to used.
+ pub const fn with_seeds(k0: u64, k1: u64) -> RandomState {
+ let (k0, k1, k2, k3) = scramble_keys(k0, k1);
+ RandomState { k0, k1, k2, k3 }
+ }
+}
+
+/// This is based on the fallback hasher
+#[inline]
+pub(crate) const fn scramble_keys(a: u64, b: u64) -> (u64, u64, u64, u64) {
+ let k1 = folded_multiply(INIT_SEED[0] ^ a, MULTIPLE).wrapping_add(b);
+ let k2 = folded_multiply(INIT_SEED[0] ^ b, MULTIPLE).wrapping_add(a);
+ let k3 = folded_multiply(INIT_SEED[1] ^ a, MULTIPLE).wrapping_add(b);
+ let k4 = folded_multiply(INIT_SEED[1] ^ b, MULTIPLE).wrapping_add(a);
+ let combined = folded_multiply(a ^ b, MULTIPLE).wrapping_add(INCREMENT);
+ let rot1 = (combined & 63) as u32;
+ let rot2 = ((combined >> 16) & 63) as u32;
+ let rot3 = ((combined >> 32) & 63) as u32;
+ let rot4 = ((combined >> 48) & 63) as u32;
+ (
+ k1.rotate_left(rot1),
+ k2.rotate_left(rot2),
+ k3.rotate_left(rot3),
+ k4.rotate_left(rot4),
+ )
+}
+
+impl Default for RandomState {
+ #[inline]
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+impl BuildHasher for RandomState {
+ type Hasher = AHasher;
+
+ /// Constructs a new [AHasher] with keys based on compile time generated constants** and the location
+ /// this object was constructed at in memory. This means that two different [BuildHasher]s will will generate
+ /// [AHasher]s that will return different hashcodes, but [Hasher]s created from the same [BuildHasher]
+ /// will generate the same hashes for the same input data.
+ ///
+ /// ** - only if the `compile-time-rng` feature is enabled.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use ahash::{AHasher, RandomState};
+ /// use std::hash::{Hasher, BuildHasher};
+ ///
+ /// let build_hasher = RandomState::new();
+ /// let mut hasher_1 = build_hasher.build_hasher();
+ /// let mut hasher_2 = build_hasher.build_hasher();
+ ///
+ /// hasher_1.write_u32(1234);
+ /// hasher_2.write_u32(1234);
+ ///
+ /// assert_eq!(hasher_1.finish(), hasher_2.finish());
+ ///
+ /// let other_build_hasher = RandomState::new();
+ /// let mut different_hasher = other_build_hasher.build_hasher();
+ /// different_hasher.write_u32(1234);
+ /// assert_ne!(different_hasher.finish(), hasher_1.finish());
+ /// ```
+ /// [Hasher]: std::hash::Hasher
+ /// [BuildHasher]: std::hash::BuildHasher
+ /// [HashMap]: std::collections::HashMap
+ #[inline]
+ fn build_hasher(&self) -> AHasher {
+ AHasher::new_with_keys([self.k0, self.k1].convert(), [self.k2, self.k3].convert())
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+
+ #[test]
+ fn test_const_rand_disabled() {
+ assert_eq!(INIT_SEED, [0x2360_ED05_1FC6_5DA4, 0x4385_DF64_9FCC_F645]);
+ }
+
+ #[test]
+ fn test_with_seeds_const() {
+ const _CONST_RANDOM_STATE: RandomState = RandomState::with_seeds(17, 19);
+ }
+}
diff --git a/third_party/rust/ahash/src/specialize.rs b/third_party/rust/ahash/src/specialize.rs
new file mode 100644
index 0000000000..2c0bc2d8af
--- /dev/null
+++ b/third_party/rust/ahash/src/specialize.rs
@@ -0,0 +1,162 @@
+#[cfg(feature = "specialize")]
+use crate::HasherExt;
+use core::hash::Hash;
+use core::hash::Hasher;
+
+/// Provides a way to get an optimized hasher for a given data type.
+/// Rather than using a Hasher generically which can hash any value, this provides a way to get a specialized hash
+/// for a specific type. So this may be faster for primitive types. It does however consume the hasher in the process.
+/// #Example
+/// ```
+/// use std::hash::BuildHasher;
+/// use ahash::RandomState;
+/// use ahash::CallHasher;
+///
+/// let hash_builder = RandomState::new();
+/// //...
+/// let value = 17;
+/// let hash = value.get_hash(hash_builder.build_hasher());
+/// ```
+pub trait CallHasher: Hash {
+ fn get_hash<H: Hasher>(&self, hasher: H) -> u64;
+}
+
+#[cfg(not(feature = "specialize"))]
+impl<T> CallHasher for T
+where
+ T: Hash,
+{
+ #[inline]
+ fn get_hash<H: Hasher>(&self, mut hasher: H) -> u64 {
+ self.hash(&mut hasher);
+ hasher.finish()
+ }
+}
+
+#[cfg(feature = "specialize")]
+impl<T> CallHasher for T
+where
+ T: Hash,
+{
+ #[inline]
+ default fn get_hash<H: Hasher>(&self, mut hasher: H) -> u64 {
+ self.hash(&mut hasher);
+ hasher.finish()
+ }
+}
+
+macro_rules! call_hasher_impl {
+ ($typ:ty) => {
+ #[cfg(feature = "specialize")]
+ impl CallHasher for $typ {
+ #[inline]
+ fn get_hash<H: Hasher>(&self, hasher: H) -> u64 {
+ hasher.hash_u64(*self as u64)
+ }
+ }
+ };
+}
+call_hasher_impl!(u8);
+call_hasher_impl!(u16);
+call_hasher_impl!(u32);
+call_hasher_impl!(u64);
+call_hasher_impl!(i8);
+call_hasher_impl!(i16);
+call_hasher_impl!(i32);
+call_hasher_impl!(i64);
+
+#[cfg(feature = "specialize")]
+impl CallHasher for u128 {
+ #[inline]
+ fn get_hash<H: Hasher>(&self, mut hasher: H) -> u64 {
+ hasher.write_u128(*self);
+ hasher.short_finish()
+ }
+}
+
+#[cfg(feature = "specialize")]
+impl CallHasher for i128 {
+ #[inline]
+ fn get_hash<H: Hasher>(&self, mut hasher: H) -> u64 {
+ hasher.write_u128(*self as u128);
+ hasher.short_finish()
+ }
+}
+
+#[cfg(feature = "specialize")]
+impl CallHasher for [u8] {
+ #[inline]
+ fn get_hash<H: Hasher>(&self, mut hasher: H) -> u64 {
+ hasher.write(self);
+ hasher.finish()
+ }
+}
+
+#[cfg(all(feature = "specialize", feature = "std"))]
+impl CallHasher for Vec<u8> {
+ #[inline]
+ fn get_hash<H: Hasher>(&self, mut hasher: H) -> u64 {
+ hasher.write(self);
+ hasher.finish()
+ }
+}
+
+#[cfg(feature = "specialize")]
+impl CallHasher for str {
+ #[inline]
+ fn get_hash<H: Hasher>(&self, mut hasher: H) -> u64 {
+ hasher.write(self.as_bytes());
+ hasher.finish()
+ }
+}
+
+#[cfg(all(feature = "specialize", feature = "std"))]
+impl CallHasher for String {
+ #[inline]
+ fn get_hash<H: Hasher>(&self, mut hasher: H) -> u64 {
+ hasher.write(self.as_bytes());
+ hasher.finish()
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+ use crate::*;
+
+ #[test]
+ #[cfg(feature = "specialize")]
+ pub fn test_specialized_invoked() {
+ let shortened = 0_u64.get_hash(AHasher::new_with_keys(1, 2));
+ let mut hasher = AHasher::new_with_keys(1, 2);
+ 0_u64.hash(&mut hasher);
+ assert_ne!(hasher.finish(), shortened);
+ }
+
+ /// Tests that some non-trivial transformation takes place.
+ #[test]
+ pub fn test_input_processed() {
+ let hasher = || AHasher::new_with_keys(3, 2);
+ assert_ne!(0, 0_u64.get_hash(hasher()));
+ assert_ne!(1, 0_u64.get_hash(hasher()));
+ assert_ne!(2, 0_u64.get_hash(hasher()));
+ assert_ne!(3, 0_u64.get_hash(hasher()));
+ assert_ne!(4, 0_u64.get_hash(hasher()));
+ assert_ne!(5, 0_u64.get_hash(hasher()));
+
+ assert_ne!(0, 1_u64.get_hash(hasher()));
+ assert_ne!(1, 1_u64.get_hash(hasher()));
+ assert_ne!(2, 1_u64.get_hash(hasher()));
+ assert_ne!(3, 1_u64.get_hash(hasher()));
+ assert_ne!(4, 1_u64.get_hash(hasher()));
+ assert_ne!(5, 1_u64.get_hash(hasher()));
+
+ let xored = 0_u64.get_hash(hasher()) ^ 1_u64.get_hash(hasher());
+ assert_ne!(0, xored);
+ assert_ne!(1, xored);
+ assert_ne!(2, xored);
+ assert_ne!(3, xored);
+ assert_ne!(4, xored);
+ assert_ne!(5, xored);
+ }
+}