summaryrefslogtreecommitdiffstats
path: root/third_party/rust/siphasher/src
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
commit36d22d82aa202bb199967e9512281e9a53db42c9 (patch)
tree105e8c98ddea1c1e4784a60a5a6410fa416be2de /third_party/rust/siphasher/src
parentInitial commit. (diff)
downloadfirefox-esr-36d22d82aa202bb199967e9512281e9a53db42c9.tar.xz
firefox-esr-36d22d82aa202bb199967e9512281e9a53db42c9.zip
Adding upstream version 115.7.0esr.upstream/115.7.0esr
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/rust/siphasher/src')
-rw-r--r--third_party/rust/siphasher/src/lib.rs24
-rw-r--r--third_party/rust/siphasher/src/sip.rs569
-rw-r--r--third_party/rust/siphasher/src/sip128.rs671
-rw-r--r--third_party/rust/siphasher/src/tests.rs309
-rw-r--r--third_party/rust/siphasher/src/tests128.rs103
5 files changed, 1676 insertions, 0 deletions
diff --git a/third_party/rust/siphasher/src/lib.rs b/third_party/rust/siphasher/src/lib.rs
new file mode 100644
index 0000000000..71e59deb5b
--- /dev/null
+++ b/third_party/rust/siphasher/src/lib.rs
@@ -0,0 +1,24 @@
+#![cfg_attr(not(test), no_std)]
+#![allow(clippy::unreadable_literal)]
+#![allow(clippy::cast_lossless)]
+#![allow(clippy::many_single_char_names)]
+
+pub mod sip;
+pub mod sip128;
+
+#[cfg(test)]
+mod tests;
+
+#[cfg(test)]
+mod tests128;
+
+#[cfg(any(feature = "serde", feature = "serde_std", feature = "serde_no_std"))]
+pub mod reexports {
+ pub use serde;
+}
+
+pub mod prelude {
+ pub use crate::{sip, sip128};
+ pub use core::hash::Hasher as _;
+ pub use sip128::Hasher128 as _;
+}
diff --git a/third_party/rust/siphasher/src/sip.rs b/third_party/rust/siphasher/src/sip.rs
new file mode 100644
index 0000000000..df6be1e799
--- /dev/null
+++ b/third_party/rust/siphasher/src/sip.rs
@@ -0,0 +1,569 @@
+// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! An implementation of SipHash.
+
+use core::cmp;
+use core::hash;
+use core::marker::PhantomData;
+use core::mem;
+use core::ptr;
+use core::u64;
+
+/// An implementation of SipHash 1-3.
+///
+/// See: <https://www.aumasson.jp/siphash/siphash.pdf>
+#[derive(Debug, Clone, Copy, Default)]
+#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
+pub struct SipHasher13 {
+ hasher: Hasher<Sip13Rounds>,
+}
+
+/// An implementation of SipHash 2-4.
+///
+/// See: <https://www.aumasson.jp/siphash/siphash.pdf>
+#[derive(Debug, Clone, Copy, Default)]
+#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
+pub struct SipHasher24 {
+ hasher: Hasher<Sip24Rounds>,
+}
+
+/// An implementation of SipHash 2-4.
+///
+/// See: <https://www.aumasson.jp/siphash/siphash.pdf>
+///
+/// SipHash is a general-purpose hashing function: it runs at a good
+/// speed (competitive with Spooky and City) and permits strong _keyed_
+/// hashing. This lets you key your hashtables from a strong RNG, such as
+/// [`rand::os::OsRng`](https://doc.rust-lang.org/rand/rand/os/struct.OsRng.html).
+///
+/// Although the SipHash algorithm is considered to be generally strong,
+/// it is not intended for cryptographic purposes. As such, all
+/// cryptographic uses of this implementation are _strongly discouraged_.
+#[derive(Debug, Clone, Copy, Default)]
+#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
+pub struct SipHasher(SipHasher24);
+
+#[derive(Debug, Clone, Copy)]
+#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
+struct Hasher<S: Sip> {
+ k0: u64,
+ k1: u64,
+ length: usize, // how many bytes we've processed
+ state: State, // hash State
+ tail: u64, // unprocessed bytes le
+ ntail: usize, // how many bytes in tail are valid
+ _marker: PhantomData<S>,
+}
+
+#[derive(Debug, Clone, Copy)]
+#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
+struct State {
+ // v0, v2 and v1, v3 show up in pairs in the algorithm,
+ // and simd implementations of SipHash will use vectors
+ // of v02 and v13. By placing them in this order in the struct,
+ // the compiler can pick up on just a few simd optimizations by itself.
+ v0: u64,
+ v2: u64,
+ v1: u64,
+ v3: u64,
+}
+
+macro_rules! compress {
+ ($state:expr) => {{
+ compress!($state.v0, $state.v1, $state.v2, $state.v3)
+ }};
+ ($v0:expr, $v1:expr, $v2:expr, $v3:expr) => {{
+ $v0 = $v0.wrapping_add($v1);
+ $v1 = $v1.rotate_left(13);
+ $v1 ^= $v0;
+ $v0 = $v0.rotate_left(32);
+ $v2 = $v2.wrapping_add($v3);
+ $v3 = $v3.rotate_left(16);
+ $v3 ^= $v2;
+ $v0 = $v0.wrapping_add($v3);
+ $v3 = $v3.rotate_left(21);
+ $v3 ^= $v0;
+ $v2 = $v2.wrapping_add($v1);
+ $v1 = $v1.rotate_left(17);
+ $v1 ^= $v2;
+ $v2 = $v2.rotate_left(32);
+ }};
+}
+
+/// Loads an integer of the desired type from a byte stream, in LE order. Uses
+/// `copy_nonoverlapping` to let the compiler generate the most efficient way
+/// to load it from a possibly unaligned address.
+///
+/// Unsafe because: unchecked indexing at `i..i+size_of(int_ty)`
+macro_rules! load_int_le {
+ ($buf:expr, $i:expr, $int_ty:ident) => {{
+ debug_assert!($i + mem::size_of::<$int_ty>() <= $buf.len());
+ let mut data = 0 as $int_ty;
+ ptr::copy_nonoverlapping(
+ $buf.as_ptr().add($i),
+ &mut data as *mut _ as *mut u8,
+ mem::size_of::<$int_ty>(),
+ );
+ data.to_le()
+ }};
+}
+
+/// Loads a u64 using up to 7 bytes of a byte slice. It looks clumsy but the
+/// `copy_nonoverlapping` calls that occur (via `load_int_le!`) all have fixed
+/// sizes and avoid calling `memcpy`, which is good for speed.
+///
+/// Unsafe because: unchecked indexing at start..start+len
+#[inline]
+unsafe fn u8to64_le(buf: &[u8], start: usize, len: usize) -> u64 {
+ debug_assert!(len < 8);
+ let mut i = 0; // current byte index (from LSB) in the output u64
+ let mut out = 0;
+ if i + 3 < len {
+ out = load_int_le!(buf, start + i, u32) as u64;
+ i += 4;
+ }
+ if i + 1 < len {
+ out |= (load_int_le!(buf, start + i, u16) as u64) << (i * 8);
+ i += 2
+ }
+ if i < len {
+ out |= (*buf.get_unchecked(start + i) as u64) << (i * 8);
+ i += 1;
+ }
+ debug_assert_eq!(i, len);
+ out
+}
+
+impl SipHasher {
+ /// Creates a new `SipHasher` with the two initial keys set to 0.
+ #[inline]
+ pub fn new() -> SipHasher {
+ SipHasher::new_with_keys(0, 0)
+ }
+
+ /// Creates a `SipHasher` that is keyed off the provided keys.
+ #[inline]
+ pub fn new_with_keys(key0: u64, key1: u64) -> SipHasher {
+ SipHasher(SipHasher24::new_with_keys(key0, key1))
+ }
+
+ /// Creates a `SipHasher` from a 16 byte key.
+ pub fn new_with_key(key: &[u8; 16]) -> SipHasher {
+ let mut b0 = [0u8; 8];
+ let mut b1 = [0u8; 8];
+ b0.copy_from_slice(&key[0..8]);
+ b1.copy_from_slice(&key[8..16]);
+ let key0 = u64::from_le_bytes(b0);
+ let key1 = u64::from_le_bytes(b1);
+ Self::new_with_keys(key0, key1)
+ }
+
+ /// Get the keys used by this hasher
+ pub fn keys(&self) -> (u64, u64) {
+ (self.0.hasher.k0, self.0.hasher.k1)
+ }
+
+ /// Get the key used by this hasher as a 16 byte vector
+ pub fn key(&self) -> [u8; 16] {
+ let mut bytes = [0u8; 16];
+ bytes[0..8].copy_from_slice(&self.0.hasher.k0.to_le_bytes());
+ bytes[8..16].copy_from_slice(&self.0.hasher.k1.to_le_bytes());
+ bytes
+ }
+}
+
+impl SipHasher13 {
+ /// Creates a new `SipHasher13` with the two initial keys set to 0.
+ #[inline]
+ pub fn new() -> SipHasher13 {
+ SipHasher13::new_with_keys(0, 0)
+ }
+
+ /// Creates a `SipHasher13` that is keyed off the provided keys.
+ #[inline]
+ pub fn new_with_keys(key0: u64, key1: u64) -> SipHasher13 {
+ SipHasher13 {
+ hasher: Hasher::new_with_keys(key0, key1),
+ }
+ }
+
+ /// Creates a `SipHasher13` from a 16 byte key.
+ pub fn new_with_key(key: &[u8; 16]) -> SipHasher13 {
+ let mut b0 = [0u8; 8];
+ let mut b1 = [0u8; 8];
+ b0.copy_from_slice(&key[0..8]);
+ b1.copy_from_slice(&key[8..16]);
+ let key0 = u64::from_le_bytes(b0);
+ let key1 = u64::from_le_bytes(b1);
+ Self::new_with_keys(key0, key1)
+ }
+
+ /// Get the keys used by this hasher
+ pub fn keys(&self) -> (u64, u64) {
+ (self.hasher.k0, self.hasher.k1)
+ }
+
+ /// Get the key used by this hasher as a 16 byte vector
+ pub fn key(&self) -> [u8; 16] {
+ let mut bytes = [0u8; 16];
+ bytes[0..8].copy_from_slice(&self.hasher.k0.to_le_bytes());
+ bytes[8..16].copy_from_slice(&self.hasher.k1.to_le_bytes());
+ bytes
+ }
+}
+
+impl SipHasher24 {
+ /// Creates a new `SipHasher24` with the two initial keys set to 0.
+ #[inline]
+ pub fn new() -> SipHasher24 {
+ SipHasher24::new_with_keys(0, 0)
+ }
+
+ /// Creates a `SipHasher24` that is keyed off the provided keys.
+ #[inline]
+ pub fn new_with_keys(key0: u64, key1: u64) -> SipHasher24 {
+ SipHasher24 {
+ hasher: Hasher::new_with_keys(key0, key1),
+ }
+ }
+
+ /// Creates a `SipHasher24` from a 16 byte key.
+ pub fn new_with_key(key: &[u8; 16]) -> SipHasher24 {
+ let mut b0 = [0u8; 8];
+ let mut b1 = [0u8; 8];
+ b0.copy_from_slice(&key[0..8]);
+ b1.copy_from_slice(&key[8..16]);
+ let key0 = u64::from_le_bytes(b0);
+ let key1 = u64::from_le_bytes(b1);
+ Self::new_with_keys(key0, key1)
+ }
+
+ /// Get the keys used by this hasher
+ pub fn keys(&self) -> (u64, u64) {
+ (self.hasher.k0, self.hasher.k1)
+ }
+
+ /// Get the key used by this hasher as a 16 byte vector
+ pub fn key(&self) -> [u8; 16] {
+ let mut bytes = [0u8; 16];
+ bytes[0..8].copy_from_slice(&self.hasher.k0.to_le_bytes());
+ bytes[8..16].copy_from_slice(&self.hasher.k1.to_le_bytes());
+ bytes
+ }
+}
+
+impl<S: Sip> Hasher<S> {
+ #[inline]
+ fn new_with_keys(key0: u64, key1: u64) -> Hasher<S> {
+ let mut state = Hasher {
+ k0: key0,
+ k1: key1,
+ length: 0,
+ state: State {
+ v0: 0,
+ v1: 0,
+ v2: 0,
+ v3: 0,
+ },
+ tail: 0,
+ ntail: 0,
+ _marker: PhantomData,
+ };
+ state.reset();
+ state
+ }
+
+ #[inline]
+ fn reset(&mut self) {
+ self.length = 0;
+ self.state.v0 = self.k0 ^ 0x736f6d6570736575;
+ self.state.v1 = self.k1 ^ 0x646f72616e646f6d;
+ self.state.v2 = self.k0 ^ 0x6c7967656e657261;
+ self.state.v3 = self.k1 ^ 0x7465646279746573;
+ self.ntail = 0;
+ }
+
+ // A specialized write function for values with size <= 8.
+ //
+ // The hashing of multi-byte integers depends on endianness. E.g.:
+ // - little-endian: `write_u32(0xDDCCBBAA)` == `write([0xAA, 0xBB, 0xCC, 0xDD])`
+ // - big-endian: `write_u32(0xDDCCBBAA)` == `write([0xDD, 0xCC, 0xBB, 0xAA])`
+ //
+ // This function does the right thing for little-endian hardware. On
+ // big-endian hardware `x` must be byte-swapped first to give the right
+ // behaviour. After any byte-swapping, the input must be zero-extended to
+ // 64-bits. The caller is responsible for the byte-swapping and
+ // zero-extension.
+ #[inline]
+ fn short_write<T>(&mut self, _x: T, x: u64) {
+ let size = mem::size_of::<T>();
+ self.length += size;
+
+ // The original number must be zero-extended, not sign-extended.
+ debug_assert!(if size < 8 { x >> (8 * size) == 0 } else { true });
+
+ // The number of bytes needed to fill `self.tail`.
+ let needed = 8 - self.ntail;
+
+ self.tail |= x << (8 * self.ntail);
+ if size < needed {
+ self.ntail += size;
+ return;
+ }
+
+ // `self.tail` is full, process it.
+ self.state.v3 ^= self.tail;
+ S::c_rounds(&mut self.state);
+ self.state.v0 ^= self.tail;
+
+ self.ntail = size - needed;
+ self.tail = if needed < 8 { x >> (8 * needed) } else { 0 };
+ }
+}
+
+impl hash::Hasher for SipHasher {
+ #[inline]
+ fn write(&mut self, msg: &[u8]) {
+ self.0.write(msg)
+ }
+
+ #[inline]
+ fn finish(&self) -> u64 {
+ self.0.finish()
+ }
+
+ #[inline]
+ fn write_usize(&mut self, i: usize) {
+ self.0.write_usize(i);
+ }
+
+ #[inline]
+ fn write_u8(&mut self, i: u8) {
+ self.0.write_u8(i);
+ }
+
+ #[inline]
+ fn write_u16(&mut self, i: u16) {
+ self.0.write_u16(i);
+ }
+
+ #[inline]
+ fn write_u32(&mut self, i: u32) {
+ self.0.write_u32(i);
+ }
+
+ #[inline]
+ fn write_u64(&mut self, i: u64) {
+ self.0.write_u64(i);
+ }
+}
+
+impl hash::Hasher for SipHasher13 {
+ #[inline]
+ fn write(&mut self, msg: &[u8]) {
+ self.hasher.write(msg)
+ }
+
+ #[inline]
+ fn finish(&self) -> u64 {
+ self.hasher.finish()
+ }
+
+ #[inline]
+ fn write_usize(&mut self, i: usize) {
+ self.hasher.write_usize(i);
+ }
+
+ #[inline]
+ fn write_u8(&mut self, i: u8) {
+ self.hasher.write_u8(i);
+ }
+
+ #[inline]
+ fn write_u16(&mut self, i: u16) {
+ self.hasher.write_u16(i);
+ }
+
+ #[inline]
+ fn write_u32(&mut self, i: u32) {
+ self.hasher.write_u32(i);
+ }
+
+ #[inline]
+ fn write_u64(&mut self, i: u64) {
+ self.hasher.write_u64(i);
+ }
+}
+
+impl hash::Hasher for SipHasher24 {
+ #[inline]
+ fn write(&mut self, msg: &[u8]) {
+ self.hasher.write(msg)
+ }
+
+ #[inline]
+ fn finish(&self) -> u64 {
+ self.hasher.finish()
+ }
+
+ #[inline]
+ fn write_usize(&mut self, i: usize) {
+ self.hasher.write_usize(i);
+ }
+
+ #[inline]
+ fn write_u8(&mut self, i: u8) {
+ self.hasher.write_u8(i);
+ }
+
+ #[inline]
+ fn write_u16(&mut self, i: u16) {
+ self.hasher.write_u16(i);
+ }
+
+ #[inline]
+ fn write_u32(&mut self, i: u32) {
+ self.hasher.write_u32(i);
+ }
+
+ #[inline]
+ fn write_u64(&mut self, i: u64) {
+ self.hasher.write_u64(i);
+ }
+}
+
+impl<S: Sip> hash::Hasher for Hasher<S> {
+ #[inline]
+ fn write_usize(&mut self, i: usize) {
+ self.short_write(i, i.to_le() as u64);
+ }
+
+ #[inline]
+ fn write_u8(&mut self, i: u8) {
+ self.short_write(i, i as u64);
+ }
+
+ #[inline]
+ fn write_u32(&mut self, i: u32) {
+ self.short_write(i, i.to_le() as u64);
+ }
+
+ #[inline]
+ fn write_u64(&mut self, i: u64) {
+ self.short_write(i, i.to_le() as u64);
+ }
+
+ #[inline]
+ fn write(&mut self, msg: &[u8]) {
+ let length = msg.len();
+ self.length += length;
+
+ let mut needed = 0;
+
+ if self.ntail != 0 {
+ needed = 8 - self.ntail;
+ self.tail |= unsafe { u8to64_le(msg, 0, cmp::min(length, needed)) } << (8 * self.ntail);
+ if length < needed {
+ self.ntail += length;
+ return;
+ } else {
+ self.state.v3 ^= self.tail;
+ S::c_rounds(&mut self.state);
+ self.state.v0 ^= self.tail;
+ self.ntail = 0;
+ }
+ }
+
+ // Buffered tail is now flushed, process new input.
+ let len = length - needed;
+ let left = len & 0x7;
+
+ let mut i = needed;
+ while i < len - left {
+ let mi = unsafe { load_int_le!(msg, i, u64) };
+
+ self.state.v3 ^= mi;
+ S::c_rounds(&mut self.state);
+ self.state.v0 ^= mi;
+
+ i += 8;
+ }
+
+ self.tail = unsafe { u8to64_le(msg, i, left) };
+ self.ntail = left;
+ }
+
+ #[inline]
+ fn finish(&self) -> u64 {
+ let mut state = self.state;
+
+ let b: u64 = ((self.length as u64 & 0xff) << 56) | self.tail;
+
+ state.v3 ^= b;
+ S::c_rounds(&mut state);
+ state.v0 ^= b;
+
+ state.v2 ^= 0xff;
+ S::d_rounds(&mut state);
+
+ state.v0 ^ state.v1 ^ state.v2 ^ state.v3
+ }
+}
+
+impl<S: Sip> Default for Hasher<S> {
+ /// Creates a `Hasher<S>` with the two initial keys set to 0.
+ #[inline]
+ fn default() -> Hasher<S> {
+ Hasher::new_with_keys(0, 0)
+ }
+}
+
+#[doc(hidden)]
+trait Sip {
+ fn c_rounds(_: &mut State);
+ fn d_rounds(_: &mut State);
+}
+
+#[derive(Debug, Clone, Copy, Default)]
+struct Sip13Rounds;
+
+impl Sip for Sip13Rounds {
+ #[inline]
+ fn c_rounds(state: &mut State) {
+ compress!(state);
+ }
+
+ #[inline]
+ fn d_rounds(state: &mut State) {
+ compress!(state);
+ compress!(state);
+ compress!(state);
+ }
+}
+
+#[derive(Debug, Clone, Copy, Default)]
+struct Sip24Rounds;
+
+impl Sip for Sip24Rounds {
+ #[inline]
+ fn c_rounds(state: &mut State) {
+ compress!(state);
+ compress!(state);
+ }
+
+ #[inline]
+ fn d_rounds(state: &mut State) {
+ compress!(state);
+ compress!(state);
+ compress!(state);
+ compress!(state);
+ }
+}
diff --git a/third_party/rust/siphasher/src/sip128.rs b/third_party/rust/siphasher/src/sip128.rs
new file mode 100644
index 0000000000..cabf8e3a3a
--- /dev/null
+++ b/third_party/rust/siphasher/src/sip128.rs
@@ -0,0 +1,671 @@
+// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! An implementation of SipHash with a 128-bit output.
+
+use core::cmp;
+use core::hash;
+use core::marker::PhantomData;
+use core::mem;
+use core::ptr;
+use core::u64;
+
+/// A 128-bit (2x64) hash output
+#[derive(Debug, Clone, Copy, Default)]
+#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
+pub struct Hash128 {
+ pub h1: u64,
+ pub h2: u64,
+}
+
+impl From<u128> for Hash128 {
+ fn from(v: u128) -> Self {
+ Hash128 {
+ h1: v as u64,
+ h2: (v >> 64) as u64,
+ }
+ }
+}
+
+impl From<Hash128> for u128 {
+ fn from(h: Hash128) -> u128 {
+ (h.h1 as u128) | ((h.h2 as u128) << 64)
+ }
+}
+
+/// An implementation of SipHash128 1-3.
+#[derive(Debug, Clone, Copy, Default)]
+#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
+pub struct SipHasher13 {
+ hasher: Hasher<Sip13Rounds>,
+}
+
+/// An implementation of SipHash128 2-4.
+#[derive(Debug, Clone, Copy, Default)]
+#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
+pub struct SipHasher24 {
+ hasher: Hasher<Sip24Rounds>,
+}
+
+/// An implementation of SipHash128 2-4.
+///
+/// SipHash is a general-purpose hashing function: it runs at a good
+/// speed (competitive with Spooky and City) and permits strong _keyed_
+/// hashing. This lets you key your hashtables from a strong RNG, such as
+/// [`rand::os::OsRng`](https://doc.rust-lang.org/rand/rand/os/struct.OsRng.html).
+///
+/// Although the SipHash algorithm is considered to be generally strong,
+/// it is not intended for cryptographic purposes. As such, all
+/// cryptographic uses of this implementation are _strongly discouraged_.
+#[derive(Debug, Clone, Copy, Default)]
+pub struct SipHasher(SipHasher24);
+
+#[derive(Debug, Copy)]
+#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
+struct Hasher<S: Sip> {
+ k0: u64,
+ k1: u64,
+ length: usize, // how many bytes we've processed
+ state: State, // hash State
+ tail: u64, // unprocessed bytes le
+ ntail: usize, // how many bytes in tail are valid
+ _marker: PhantomData<S>,
+}
+
+#[derive(Debug, Clone, Copy)]
+#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
+struct State {
+ // v0, v2 and v1, v3 show up in pairs in the algorithm,
+ // and simd implementations of SipHash will use vectors
+ // of v02 and v13. By placing them in this order in the struct,
+ // the compiler can pick up on just a few simd optimizations by itself.
+ v0: u64,
+ v2: u64,
+ v1: u64,
+ v3: u64,
+}
+
+macro_rules! compress {
+ ($state:expr) => {{
+ compress!($state.v0, $state.v1, $state.v2, $state.v3)
+ }};
+ ($v0:expr, $v1:expr, $v2:expr, $v3:expr) => {{
+ $v0 = $v0.wrapping_add($v1);
+ $v1 = $v1.rotate_left(13);
+ $v1 ^= $v0;
+ $v0 = $v0.rotate_left(32);
+ $v2 = $v2.wrapping_add($v3);
+ $v3 = $v3.rotate_left(16);
+ $v3 ^= $v2;
+ $v0 = $v0.wrapping_add($v3);
+ $v3 = $v3.rotate_left(21);
+ $v3 ^= $v0;
+ $v2 = $v2.wrapping_add($v1);
+ $v1 = $v1.rotate_left(17);
+ $v1 ^= $v2;
+ $v2 = $v2.rotate_left(32);
+ }};
+}
+
+/// Loads an integer of the desired type from a byte stream, in LE order. Uses
+/// `copy_nonoverlapping` to let the compiler generate the most efficient way
+/// to load it from a possibly unaligned address.
+///
+/// Unsafe because: unchecked indexing at `i..i+size_of(int_ty)`
+macro_rules! load_int_le {
+ ($buf:expr, $i:expr, $int_ty:ident) => {{
+ debug_assert!($i + mem::size_of::<$int_ty>() <= $buf.len());
+ let mut data = 0 as $int_ty;
+ ptr::copy_nonoverlapping(
+ $buf.as_ptr().add($i),
+ &mut data as *mut _ as *mut u8,
+ mem::size_of::<$int_ty>(),
+ );
+ data.to_le()
+ }};
+}
+
+/// Loads a u64 using up to 7 bytes of a byte slice. It looks clumsy but the
+/// `copy_nonoverlapping` calls that occur (via `load_int_le!`) all have fixed
+/// sizes and avoid calling `memcpy`, which is good for speed.
+///
+/// Unsafe because: unchecked indexing at start..start+len
+#[inline]
+unsafe fn u8to64_le(buf: &[u8], start: usize, len: usize) -> u64 {
+ debug_assert!(len < 8);
+ let mut i = 0; // current byte index (from LSB) in the output u64
+ let mut out = 0;
+ if i + 3 < len {
+ out = load_int_le!(buf, start + i, u32) as u64;
+ i += 4;
+ }
+ if i + 1 < len {
+ out |= (load_int_le!(buf, start + i, u16) as u64) << (i * 8);
+ i += 2
+ }
+ if i < len {
+ out |= (*buf.get_unchecked(start + i) as u64) << (i * 8);
+ i += 1;
+ }
+ debug_assert_eq!(i, len);
+ out
+}
+
+pub trait Hasher128 {
+ /// Return a 128-bit hash
+ fn finish128(&self) -> Hash128;
+}
+
+impl SipHasher {
+ /// Creates a new `SipHasher` with the two initial keys set to 0.
+ #[inline]
+ pub fn new() -> SipHasher {
+ SipHasher::new_with_keys(0, 0)
+ }
+
+ /// Creates a `SipHasher` that is keyed off the provided keys.
+ #[inline]
+ pub fn new_with_keys(key0: u64, key1: u64) -> SipHasher {
+ SipHasher(SipHasher24::new_with_keys(key0, key1))
+ }
+
+ /// Creates a `SipHasher` from a 16 byte key.
+ pub fn new_with_key(key: &[u8; 16]) -> SipHasher {
+ let mut b0 = [0u8; 8];
+ let mut b1 = [0u8; 8];
+ b0.copy_from_slice(&key[0..8]);
+ b1.copy_from_slice(&key[8..16]);
+ let key0 = u64::from_le_bytes(b0);
+ let key1 = u64::from_le_bytes(b1);
+ Self::new_with_keys(key0, key1)
+ }
+
+ /// Get the keys used by this hasher
+ pub fn keys(&self) -> (u64, u64) {
+ (self.0.hasher.k0, self.0.hasher.k1)
+ }
+
+ /// Get the key used by this hasher as a 16 byte vector
+ pub fn key(&self) -> [u8; 16] {
+ let mut bytes = [0u8; 16];
+ bytes[0..8].copy_from_slice(&self.0.hasher.k0.to_le_bytes());
+ bytes[8..16].copy_from_slice(&self.0.hasher.k1.to_le_bytes());
+ bytes
+ }
+}
+
+impl Hasher128 for SipHasher {
+ /// Return a 128-bit hash
+ #[inline]
+ fn finish128(&self) -> Hash128 {
+ self.0.finish128()
+ }
+}
+
+impl SipHasher13 {
+ /// Creates a new `SipHasher13` with the two initial keys set to 0.
+ #[inline]
+ pub fn new() -> SipHasher13 {
+ SipHasher13::new_with_keys(0, 0)
+ }
+
+ /// Creates a `SipHasher13` that is keyed off the provided keys.
+ #[inline]
+ pub fn new_with_keys(key0: u64, key1: u64) -> SipHasher13 {
+ SipHasher13 {
+ hasher: Hasher::new_with_keys(key0, key1),
+ }
+ }
+
+ /// Creates a `SipHasher13` from a 16 byte key.
+ pub fn new_with_key(key: &[u8; 16]) -> SipHasher13 {
+ let mut b0 = [0u8; 8];
+ let mut b1 = [0u8; 8];
+ b0.copy_from_slice(&key[0..8]);
+ b1.copy_from_slice(&key[8..16]);
+ let key0 = u64::from_le_bytes(b0);
+ let key1 = u64::from_le_bytes(b1);
+ Self::new_with_keys(key0, key1)
+ }
+
+ /// Get the keys used by this hasher
+ pub fn keys(&self) -> (u64, u64) {
+ (self.hasher.k0, self.hasher.k1)
+ }
+
+ /// Get the key used by this hasher as a 16 byte vector
+ pub fn key(&self) -> [u8; 16] {
+ let mut bytes = [0u8; 16];
+ bytes[0..8].copy_from_slice(&self.hasher.k0.to_le_bytes());
+ bytes[8..16].copy_from_slice(&self.hasher.k1.to_le_bytes());
+ bytes
+ }
+}
+
+impl Hasher128 for SipHasher13 {
+ /// Return a 128-bit hash
+ #[inline]
+ fn finish128(&self) -> Hash128 {
+ self.hasher.finish128()
+ }
+}
+
+impl SipHasher24 {
+ /// Creates a new `SipHasher24` with the two initial keys set to 0.
+ #[inline]
+ pub fn new() -> SipHasher24 {
+ SipHasher24::new_with_keys(0, 0)
+ }
+
+ /// Creates a `SipHasher24` that is keyed off the provided keys.
+ #[inline]
+ pub fn new_with_keys(key0: u64, key1: u64) -> SipHasher24 {
+ SipHasher24 {
+ hasher: Hasher::new_with_keys(key0, key1),
+ }
+ }
+
+ /// Creates a `SipHasher24` from a 16 byte key.
+ pub fn new_with_key(key: &[u8; 16]) -> SipHasher24 {
+ let mut b0 = [0u8; 8];
+ let mut b1 = [0u8; 8];
+ b0.copy_from_slice(&key[0..8]);
+ b1.copy_from_slice(&key[8..16]);
+ let key0 = u64::from_le_bytes(b0);
+ let key1 = u64::from_le_bytes(b1);
+ Self::new_with_keys(key0, key1)
+ }
+
+ /// Get the keys used by this hasher
+ pub fn keys(&self) -> (u64, u64) {
+ (self.hasher.k0, self.hasher.k1)
+ }
+
+ /// Get the key used by this hasher as a 16 byte vector
+ pub fn key(&self) -> [u8; 16] {
+ let mut bytes = [0u8; 16];
+ bytes[0..8].copy_from_slice(&self.hasher.k0.to_le_bytes());
+ bytes[8..16].copy_from_slice(&self.hasher.k1.to_le_bytes());
+ bytes
+ }
+}
+
+impl Hasher128 for SipHasher24 {
+ /// Return a 128-bit hash
+ #[inline]
+ fn finish128(&self) -> Hash128 {
+ self.hasher.finish128()
+ }
+}
+
+impl<S: Sip> Hasher<S> {
+ #[inline]
+ fn new_with_keys(key0: u64, key1: u64) -> Hasher<S> {
+ let mut state = Hasher {
+ k0: key0,
+ k1: key1,
+ length: 0,
+ state: State {
+ v0: 0,
+ v1: 0xee,
+ v2: 0,
+ v3: 0,
+ },
+ tail: 0,
+ ntail: 0,
+ _marker: PhantomData,
+ };
+ state.reset();
+ state
+ }
+
+ #[inline]
+ fn reset(&mut self) {
+ self.length = 0;
+ self.state.v0 = self.k0 ^ 0x736f6d6570736575;
+ self.state.v1 = self.k1 ^ 0x646f72616e646f83;
+ self.state.v2 = self.k0 ^ 0x6c7967656e657261;
+ self.state.v3 = self.k1 ^ 0x7465646279746573;
+ self.ntail = 0;
+ }
+
+ // A specialized write function for values with size <= 8.
+ //
+ // The hashing of multi-byte integers depends on endianness. E.g.:
+ // - little-endian: `write_u32(0xDDCCBBAA)` == `write([0xAA, 0xBB, 0xCC, 0xDD])`
+ // - big-endian: `write_u32(0xDDCCBBAA)` == `write([0xDD, 0xCC, 0xBB, 0xAA])`
+ //
+ // This function does the right thing for little-endian hardware. On
+ // big-endian hardware `x` must be byte-swapped first to give the right
+ // behaviour. After any byte-swapping, the input must be zero-extended to
+ // 64-bits. The caller is responsible for the byte-swapping and
+ // zero-extension.
+ #[inline]
+ fn short_write<T>(&mut self, _x: T, x: u64) {
+ let size = mem::size_of::<T>();
+ self.length += size;
+
+ // The original number must be zero-extended, not sign-extended.
+ debug_assert!(if size < 8 { x >> (8 * size) == 0 } else { true });
+
+ // The number of bytes needed to fill `self.tail`.
+ let needed = 8 - self.ntail;
+
+ self.tail |= x << (8 * self.ntail);
+ if size < needed {
+ self.ntail += size;
+ return;
+ }
+
+ // `self.tail` is full, process it.
+ self.state.v3 ^= self.tail;
+ S::c_rounds(&mut self.state);
+ self.state.v0 ^= self.tail;
+
+ self.ntail = size - needed;
+ self.tail = if needed < 8 { x >> (8 * needed) } else { 0 };
+ }
+}
+
+impl<S: Sip> Hasher<S> {
+ #[inline]
+ pub fn finish128(&self) -> Hash128 {
+ let mut state = self.state;
+
+ let b: u64 = ((self.length as u64 & 0xff) << 56) | self.tail;
+
+ state.v3 ^= b;
+ S::c_rounds(&mut state);
+ state.v0 ^= b;
+
+ state.v2 ^= 0xee;
+ S::d_rounds(&mut state);
+ let h1 = state.v0 ^ state.v1 ^ state.v2 ^ state.v3;
+
+ state.v1 ^= 0xdd;
+ S::d_rounds(&mut state);
+ let h2 = state.v0 ^ state.v1 ^ state.v2 ^ state.v3;
+
+ Hash128 { h1, h2 }
+ }
+}
+
+impl hash::Hasher for SipHasher {
+ #[inline]
+ fn write(&mut self, msg: &[u8]) {
+ self.0.write(msg)
+ }
+
+ #[inline]
+ fn finish(&self) -> u64 {
+ self.0.finish()
+ }
+
+ #[inline]
+ fn write_usize(&mut self, i: usize) {
+ self.0.write_usize(i);
+ }
+
+ #[inline]
+ fn write_u8(&mut self, i: u8) {
+ self.0.write_u8(i);
+ }
+
+ #[inline]
+ fn write_u16(&mut self, i: u16) {
+ self.0.write_u16(i);
+ }
+
+ #[inline]
+ fn write_u32(&mut self, i: u32) {
+ self.0.write_u32(i);
+ }
+
+ #[inline]
+ fn write_u64(&mut self, i: u64) {
+ self.0.write_u64(i);
+ }
+}
+
+impl hash::Hasher for SipHasher13 {
+ #[inline]
+ fn write(&mut self, msg: &[u8]) {
+ self.hasher.write(msg)
+ }
+
+ #[inline]
+ fn finish(&self) -> u64 {
+ self.hasher.finish()
+ }
+
+ #[inline]
+ fn write_usize(&mut self, i: usize) {
+ self.hasher.write_usize(i);
+ }
+
+ #[inline]
+ fn write_u8(&mut self, i: u8) {
+ self.hasher.write_u8(i);
+ }
+
+ #[inline]
+ fn write_u16(&mut self, i: u16) {
+ self.hasher.write_u16(i);
+ }
+
+ #[inline]
+ fn write_u32(&mut self, i: u32) {
+ self.hasher.write_u32(i);
+ }
+
+ #[inline]
+ fn write_u64(&mut self, i: u64) {
+ self.hasher.write_u64(i);
+ }
+}
+
+impl hash::Hasher for SipHasher24 {
+ #[inline]
+ fn write(&mut self, msg: &[u8]) {
+ self.hasher.write(msg)
+ }
+
+ #[inline]
+ fn finish(&self) -> u64 {
+ self.hasher.finish()
+ }
+
+ #[inline]
+ fn write_usize(&mut self, i: usize) {
+ self.hasher.write_usize(i);
+ }
+
+ #[inline]
+ fn write_u8(&mut self, i: u8) {
+ self.hasher.write_u8(i);
+ }
+
+ #[inline]
+ fn write_u16(&mut self, i: u16) {
+ self.hasher.write_u16(i);
+ }
+
+ #[inline]
+ fn write_u32(&mut self, i: u32) {
+ self.hasher.write_u32(i);
+ }
+
+ #[inline]
+ fn write_u64(&mut self, i: u64) {
+ self.hasher.write_u64(i);
+ }
+}
+
+impl<S: Sip> hash::Hasher for Hasher<S> {
+ #[inline]
+ fn write_usize(&mut self, i: usize) {
+ self.short_write(i, i.to_le() as u64);
+ }
+
+ #[inline]
+ fn write_u8(&mut self, i: u8) {
+ self.short_write(i, i as u64);
+ }
+
+ #[inline]
+ fn write_u32(&mut self, i: u32) {
+ self.short_write(i, i.to_le() as u64);
+ }
+
+ #[inline]
+ fn write_u64(&mut self, i: u64) {
+ self.short_write(i, i.to_le() as u64);
+ }
+
+ #[inline]
+ fn write(&mut self, msg: &[u8]) {
+ let length = msg.len();
+ self.length += length;
+
+ let mut needed = 0;
+
+ if self.ntail != 0 {
+ needed = 8 - self.ntail;
+ self.tail |= unsafe { u8to64_le(msg, 0, cmp::min(length, needed)) } << (8 * self.ntail);
+ if length < needed {
+ self.ntail += length;
+ return;
+ } else {
+ self.state.v3 ^= self.tail;
+ S::c_rounds(&mut self.state);
+ self.state.v0 ^= self.tail;
+ self.ntail = 0;
+ }
+ }
+
+ // Buffered tail is now flushed, process new input.
+ let len = length - needed;
+ let left = len & 0x7;
+
+ let mut i = needed;
+ while i < len - left {
+ let mi = unsafe { load_int_le!(msg, i, u64) };
+
+ self.state.v3 ^= mi;
+ S::c_rounds(&mut self.state);
+ self.state.v0 ^= mi;
+
+ i += 8;
+ }
+
+ self.tail = unsafe { u8to64_le(msg, i, left) };
+ self.ntail = left;
+ }
+
+ #[inline]
+ fn finish(&self) -> u64 {
+ self.finish128().h2
+ }
+}
+
+impl<S: Sip> Clone for Hasher<S> {
+ #[inline]
+ fn clone(&self) -> Hasher<S> {
+ Hasher {
+ k0: self.k0,
+ k1: self.k1,
+ length: self.length,
+ state: self.state,
+ tail: self.tail,
+ ntail: self.ntail,
+ _marker: self._marker,
+ }
+ }
+}
+
+impl<S: Sip> Default for Hasher<S> {
+ /// Creates a `Hasher<S>` with the two initial keys set to 0.
+ #[inline]
+ fn default() -> Hasher<S> {
+ Hasher::new_with_keys(0, 0)
+ }
+}
+
+#[doc(hidden)]
+trait Sip {
+ fn c_rounds(_: &mut State);
+ fn d_rounds(_: &mut State);
+}
+
+#[derive(Debug, Clone, Copy, Default)]
+struct Sip13Rounds;
+
+impl Sip for Sip13Rounds {
+ #[inline]
+ fn c_rounds(state: &mut State) {
+ compress!(state);
+ }
+
+ #[inline]
+ fn d_rounds(state: &mut State) {
+ compress!(state);
+ compress!(state);
+ compress!(state);
+ }
+}
+
+#[derive(Debug, Clone, Copy, Default)]
+struct Sip24Rounds;
+
+impl Sip for Sip24Rounds {
+ #[inline]
+ fn c_rounds(state: &mut State) {
+ compress!(state);
+ compress!(state);
+ }
+
+ #[inline]
+ fn d_rounds(state: &mut State) {
+ compress!(state);
+ compress!(state);
+ compress!(state);
+ compress!(state);
+ }
+}
+
+impl Hash128 {
+ /// Convert into a 16-bytes vector
+ pub fn as_bytes(&self) -> [u8; 16] {
+ let mut bytes = [0u8; 16];
+ let h1 = self.h1.to_le();
+ let h2 = self.h2.to_le();
+ unsafe {
+ ptr::copy_nonoverlapping(&h1 as *const _ as *const u8, bytes.as_mut_ptr(), 8);
+ ptr::copy_nonoverlapping(&h2 as *const _ as *const u8, bytes.as_mut_ptr().add(8), 8);
+ }
+ bytes
+ }
+
+ /// Convert into a `u128`
+ #[inline]
+ pub fn as_u128(&self) -> u128 {
+ let h1 = self.h1.to_le();
+ let h2 = self.h2.to_le();
+ h1 as u128 | ((h2 as u128) << 64)
+ }
+
+ /// Convert into `(u64, u64)`
+ #[inline]
+ pub fn as_u64(&self) -> (u64, u64) {
+ let h1 = self.h1.to_le();
+ let h2 = self.h2.to_le();
+ (h1, h2)
+ }
+}
diff --git a/third_party/rust/siphasher/src/tests.rs b/third_party/rust/siphasher/src/tests.rs
new file mode 100644
index 0000000000..05d4b6a18a
--- /dev/null
+++ b/third_party/rust/siphasher/src/tests.rs
@@ -0,0 +1,309 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use super::sip::{SipHasher, SipHasher13, SipHasher24};
+use std::hash::{Hash, Hasher};
+
+// Hash just the bytes of the slice, without length prefix
+struct Bytes<'a>(&'a [u8]);
+
+impl<'a> Hash for Bytes<'a> {
+ #[allow(unused_must_use)]
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ let Bytes(v) = *self;
+ state.write(v);
+ }
+}
+
+macro_rules! u8to64_le {
+ ($buf:expr, $i:expr) => {
+ $buf[0 + $i] as u64
+ | ($buf[1 + $i] as u64) << 8
+ | ($buf[2 + $i] as u64) << 16
+ | ($buf[3 + $i] as u64) << 24
+ | ($buf[4 + $i] as u64) << 32
+ | ($buf[5 + $i] as u64) << 40
+ | ($buf[6 + $i] as u64) << 48
+ | ($buf[7 + $i] as u64) << 56
+ };
+ ($buf:expr, $i:expr, $len:expr) => {{
+ let mut t = 0;
+ let mut out = 0;
+ while t < $len {
+ out |= ($buf[t + $i] as u64) << t * 8;
+ t += 1;
+ }
+ out
+ }};
+}
+
+fn hash_with<H: Hasher, T: Hash>(mut st: H, x: &T) -> u64 {
+ x.hash(&mut st);
+ st.finish()
+}
+
+fn hash<T: Hash>(x: &T) -> u64 {
+ hash_with(SipHasher::new(), x)
+}
+
+#[test]
+#[allow(unused_must_use)]
+fn test_siphash_1_3() {
+ let vecs: [[u8; 8]; 64] = [
+ [0xdc, 0xc4, 0x0f, 0x05, 0x58, 0x01, 0xac, 0xab],
+ [0x93, 0xca, 0x57, 0x7d, 0xf3, 0x9b, 0xf4, 0xc9],
+ [0x4d, 0xd4, 0xc7, 0x4d, 0x02, 0x9b, 0xcb, 0x82],
+ [0xfb, 0xf7, 0xdd, 0xe7, 0xb8, 0x0a, 0xf8, 0x8b],
+ [0x28, 0x83, 0xd3, 0x88, 0x60, 0x57, 0x75, 0xcf],
+ [0x67, 0x3b, 0x53, 0x49, 0x2f, 0xd5, 0xf9, 0xde],
+ [0xa7, 0x22, 0x9f, 0xc5, 0x50, 0x2b, 0x0d, 0xc5],
+ [0x40, 0x11, 0xb1, 0x9b, 0x98, 0x7d, 0x92, 0xd3],
+ [0x8e, 0x9a, 0x29, 0x8d, 0x11, 0x95, 0x90, 0x36],
+ [0xe4, 0x3d, 0x06, 0x6c, 0xb3, 0x8e, 0xa4, 0x25],
+ [0x7f, 0x09, 0xff, 0x92, 0xee, 0x85, 0xde, 0x79],
+ [0x52, 0xc3, 0x4d, 0xf9, 0xc1, 0x18, 0xc1, 0x70],
+ [0xa2, 0xd9, 0xb4, 0x57, 0xb1, 0x84, 0xa3, 0x78],
+ [0xa7, 0xff, 0x29, 0x12, 0x0c, 0x76, 0x6f, 0x30],
+ [0x34, 0x5d, 0xf9, 0xc0, 0x11, 0xa1, 0x5a, 0x60],
+ [0x56, 0x99, 0x51, 0x2a, 0x6d, 0xd8, 0x20, 0xd3],
+ [0x66, 0x8b, 0x90, 0x7d, 0x1a, 0xdd, 0x4f, 0xcc],
+ [0x0c, 0xd8, 0xdb, 0x63, 0x90, 0x68, 0xf2, 0x9c],
+ [0x3e, 0xe6, 0x73, 0xb4, 0x9c, 0x38, 0xfc, 0x8f],
+ [0x1c, 0x7d, 0x29, 0x8d, 0xe5, 0x9d, 0x1f, 0xf2],
+ [0x40, 0xe0, 0xcc, 0xa6, 0x46, 0x2f, 0xdc, 0xc0],
+ [0x44, 0xf8, 0x45, 0x2b, 0xfe, 0xab, 0x92, 0xb9],
+ [0x2e, 0x87, 0x20, 0xa3, 0x9b, 0x7b, 0xfe, 0x7f],
+ [0x23, 0xc1, 0xe6, 0xda, 0x7f, 0x0e, 0x5a, 0x52],
+ [0x8c, 0x9c, 0x34, 0x67, 0xb2, 0xae, 0x64, 0xf4],
+ [0x79, 0x09, 0x5b, 0x70, 0x28, 0x59, 0xcd, 0x45],
+ [0xa5, 0x13, 0x99, 0xca, 0xe3, 0x35, 0x3e, 0x3a],
+ [0x35, 0x3b, 0xde, 0x4a, 0x4e, 0xc7, 0x1d, 0xa9],
+ [0x0d, 0xd0, 0x6c, 0xef, 0x02, 0xed, 0x0b, 0xfb],
+ [0xf4, 0xe1, 0xb1, 0x4a, 0xb4, 0x3c, 0xd9, 0x88],
+ [0x63, 0xe6, 0xc5, 0x43, 0xd6, 0x11, 0x0f, 0x54],
+ [0xbc, 0xd1, 0x21, 0x8c, 0x1f, 0xdd, 0x70, 0x23],
+ [0x0d, 0xb6, 0xa7, 0x16, 0x6c, 0x7b, 0x15, 0x81],
+ [0xbf, 0xf9, 0x8f, 0x7a, 0xe5, 0xb9, 0x54, 0x4d],
+ [0x3e, 0x75, 0x2a, 0x1f, 0x78, 0x12, 0x9f, 0x75],
+ [0x91, 0x6b, 0x18, 0xbf, 0xbe, 0xa3, 0xa1, 0xce],
+ [0x06, 0x62, 0xa2, 0xad, 0xd3, 0x08, 0xf5, 0x2c],
+ [0x57, 0x30, 0xc3, 0xa3, 0x2d, 0x1c, 0x10, 0xb6],
+ [0xa1, 0x36, 0x3a, 0xae, 0x96, 0x74, 0xf4, 0xb3],
+ [0x92, 0x83, 0x10, 0x7b, 0x54, 0x57, 0x6b, 0x62],
+ [0x31, 0x15, 0xe4, 0x99, 0x32, 0x36, 0xd2, 0xc1],
+ [0x44, 0xd9, 0x1a, 0x3f, 0x92, 0xc1, 0x7c, 0x66],
+ [0x25, 0x88, 0x13, 0xc8, 0xfe, 0x4f, 0x70, 0x65],
+ [0xa6, 0x49, 0x89, 0xc2, 0xd1, 0x80, 0xf2, 0x24],
+ [0x6b, 0x87, 0xf8, 0xfa, 0xed, 0x1c, 0xca, 0xc2],
+ [0x96, 0x21, 0x04, 0x9f, 0xfc, 0x4b, 0x16, 0xc2],
+ [0x23, 0xd6, 0xb1, 0x68, 0x93, 0x9c, 0x6e, 0xa1],
+ [0xfd, 0x14, 0x51, 0x8b, 0x9c, 0x16, 0xfb, 0x49],
+ [0x46, 0x4c, 0x07, 0xdf, 0xf8, 0x43, 0x31, 0x9f],
+ [0xb3, 0x86, 0xcc, 0x12, 0x24, 0xaf, 0xfd, 0xc6],
+ [0x8f, 0x09, 0x52, 0x0a, 0xd1, 0x49, 0xaf, 0x7e],
+ [0x9a, 0x2f, 0x29, 0x9d, 0x55, 0x13, 0xf3, 0x1c],
+ [0x12, 0x1f, 0xf4, 0xa2, 0xdd, 0x30, 0x4a, 0xc4],
+ [0xd0, 0x1e, 0xa7, 0x43, 0x89, 0xe9, 0xfa, 0x36],
+ [0xe6, 0xbc, 0xf0, 0x73, 0x4c, 0xb3, 0x8f, 0x31],
+ [0x80, 0xe9, 0xa7, 0x70, 0x36, 0xbf, 0x7a, 0xa2],
+ [0x75, 0x6d, 0x3c, 0x24, 0xdb, 0xc0, 0xbc, 0xb4],
+ [0x13, 0x15, 0xb7, 0xfd, 0x52, 0xd8, 0xf8, 0x23],
+ [0x08, 0x8a, 0x7d, 0xa6, 0x4d, 0x5f, 0x03, 0x8f],
+ [0x48, 0xf1, 0xe8, 0xb7, 0xe5, 0xd0, 0x9c, 0xd8],
+ [0xee, 0x44, 0xa6, 0xf7, 0xbc, 0xe6, 0xf4, 0xf6],
+ [0xf2, 0x37, 0x18, 0x0f, 0xd8, 0x9a, 0xc5, 0xae],
+ [0xe0, 0x94, 0x66, 0x4b, 0x15, 0xf6, 0xb2, 0xc3],
+ [0xa8, 0xb3, 0xbb, 0xb7, 0x62, 0x90, 0x19, 0x9d],
+ ];
+
+ let k0 = 0x_07_06_05_04_03_02_01_00;
+ let k1 = 0x_0f_0e_0d_0c_0b_0a_09_08;
+ let mut buf = Vec::new();
+ let mut t = 0;
+ let mut state_inc = SipHasher13::new_with_keys(k0, k1);
+
+ while t < 64 {
+ let vec = u8to64_le!(vecs[t], 0);
+ let out = hash_with(SipHasher13::new_with_keys(k0, k1), &Bytes(&buf));
+ assert_eq!(vec, out);
+
+ let full = hash_with(SipHasher13::new_with_keys(k0, k1), &Bytes(&buf));
+ let i = state_inc.finish();
+
+ assert_eq!(full, i);
+ assert_eq!(full, vec);
+
+ buf.push(t as u8);
+ Hasher::write(&mut state_inc, &[t as u8]);
+
+ t += 1;
+ }
+}
+
+#[test]
+#[allow(unused_must_use)]
+fn test_siphash_2_4() {
+ let vecs: [[u8; 8]; 64] = [
+ [0x31, 0x0e, 0x0e, 0xdd, 0x47, 0xdb, 0x6f, 0x72],
+ [0xfd, 0x67, 0xdc, 0x93, 0xc5, 0x39, 0xf8, 0x74],
+ [0x5a, 0x4f, 0xa9, 0xd9, 0x09, 0x80, 0x6c, 0x0d],
+ [0x2d, 0x7e, 0xfb, 0xd7, 0x96, 0x66, 0x67, 0x85],
+ [0xb7, 0x87, 0x71, 0x27, 0xe0, 0x94, 0x27, 0xcf],
+ [0x8d, 0xa6, 0x99, 0xcd, 0x64, 0x55, 0x76, 0x18],
+ [0xce, 0xe3, 0xfe, 0x58, 0x6e, 0x46, 0xc9, 0xcb],
+ [0x37, 0xd1, 0x01, 0x8b, 0xf5, 0x00, 0x02, 0xab],
+ [0x62, 0x24, 0x93, 0x9a, 0x79, 0xf5, 0xf5, 0x93],
+ [0xb0, 0xe4, 0xa9, 0x0b, 0xdf, 0x82, 0x00, 0x9e],
+ [0xf3, 0xb9, 0xdd, 0x94, 0xc5, 0xbb, 0x5d, 0x7a],
+ [0xa7, 0xad, 0x6b, 0x22, 0x46, 0x2f, 0xb3, 0xf4],
+ [0xfb, 0xe5, 0x0e, 0x86, 0xbc, 0x8f, 0x1e, 0x75],
+ [0x90, 0x3d, 0x84, 0xc0, 0x27, 0x56, 0xea, 0x14],
+ [0xee, 0xf2, 0x7a, 0x8e, 0x90, 0xca, 0x23, 0xf7],
+ [0xe5, 0x45, 0xbe, 0x49, 0x61, 0xca, 0x29, 0xa1],
+ [0xdb, 0x9b, 0xc2, 0x57, 0x7f, 0xcc, 0x2a, 0x3f],
+ [0x94, 0x47, 0xbe, 0x2c, 0xf5, 0xe9, 0x9a, 0x69],
+ [0x9c, 0xd3, 0x8d, 0x96, 0xf0, 0xb3, 0xc1, 0x4b],
+ [0xbd, 0x61, 0x79, 0xa7, 0x1d, 0xc9, 0x6d, 0xbb],
+ [0x98, 0xee, 0xa2, 0x1a, 0xf2, 0x5c, 0xd6, 0xbe],
+ [0xc7, 0x67, 0x3b, 0x2e, 0xb0, 0xcb, 0xf2, 0xd0],
+ [0x88, 0x3e, 0xa3, 0xe3, 0x95, 0x67, 0x53, 0x93],
+ [0xc8, 0xce, 0x5c, 0xcd, 0x8c, 0x03, 0x0c, 0xa8],
+ [0x94, 0xaf, 0x49, 0xf6, 0xc6, 0x50, 0xad, 0xb8],
+ [0xea, 0xb8, 0x85, 0x8a, 0xde, 0x92, 0xe1, 0xbc],
+ [0xf3, 0x15, 0xbb, 0x5b, 0xb8, 0x35, 0xd8, 0x17],
+ [0xad, 0xcf, 0x6b, 0x07, 0x63, 0x61, 0x2e, 0x2f],
+ [0xa5, 0xc9, 0x1d, 0xa7, 0xac, 0xaa, 0x4d, 0xde],
+ [0x71, 0x65, 0x95, 0x87, 0x66, 0x50, 0xa2, 0xa6],
+ [0x28, 0xef, 0x49, 0x5c, 0x53, 0xa3, 0x87, 0xad],
+ [0x42, 0xc3, 0x41, 0xd8, 0xfa, 0x92, 0xd8, 0x32],
+ [0xce, 0x7c, 0xf2, 0x72, 0x2f, 0x51, 0x27, 0x71],
+ [0xe3, 0x78, 0x59, 0xf9, 0x46, 0x23, 0xf3, 0xa7],
+ [0x38, 0x12, 0x05, 0xbb, 0x1a, 0xb0, 0xe0, 0x12],
+ [0xae, 0x97, 0xa1, 0x0f, 0xd4, 0x34, 0xe0, 0x15],
+ [0xb4, 0xa3, 0x15, 0x08, 0xbe, 0xff, 0x4d, 0x31],
+ [0x81, 0x39, 0x62, 0x29, 0xf0, 0x90, 0x79, 0x02],
+ [0x4d, 0x0c, 0xf4, 0x9e, 0xe5, 0xd4, 0xdc, 0xca],
+ [0x5c, 0x73, 0x33, 0x6a, 0x76, 0xd8, 0xbf, 0x9a],
+ [0xd0, 0xa7, 0x04, 0x53, 0x6b, 0xa9, 0x3e, 0x0e],
+ [0x92, 0x59, 0x58, 0xfc, 0xd6, 0x42, 0x0c, 0xad],
+ [0xa9, 0x15, 0xc2, 0x9b, 0xc8, 0x06, 0x73, 0x18],
+ [0x95, 0x2b, 0x79, 0xf3, 0xbc, 0x0a, 0xa6, 0xd4],
+ [0xf2, 0x1d, 0xf2, 0xe4, 0x1d, 0x45, 0x35, 0xf9],
+ [0x87, 0x57, 0x75, 0x19, 0x04, 0x8f, 0x53, 0xa9],
+ [0x10, 0xa5, 0x6c, 0xf5, 0xdf, 0xcd, 0x9a, 0xdb],
+ [0xeb, 0x75, 0x09, 0x5c, 0xcd, 0x98, 0x6c, 0xd0],
+ [0x51, 0xa9, 0xcb, 0x9e, 0xcb, 0xa3, 0x12, 0xe6],
+ [0x96, 0xaf, 0xad, 0xfc, 0x2c, 0xe6, 0x66, 0xc7],
+ [0x72, 0xfe, 0x52, 0x97, 0x5a, 0x43, 0x64, 0xee],
+ [0x5a, 0x16, 0x45, 0xb2, 0x76, 0xd5, 0x92, 0xa1],
+ [0xb2, 0x74, 0xcb, 0x8e, 0xbf, 0x87, 0x87, 0x0a],
+ [0x6f, 0x9b, 0xb4, 0x20, 0x3d, 0xe7, 0xb3, 0x81],
+ [0xea, 0xec, 0xb2, 0xa3, 0x0b, 0x22, 0xa8, 0x7f],
+ [0x99, 0x24, 0xa4, 0x3c, 0xc1, 0x31, 0x57, 0x24],
+ [0xbd, 0x83, 0x8d, 0x3a, 0xaf, 0xbf, 0x8d, 0xb7],
+ [0x0b, 0x1a, 0x2a, 0x32, 0x65, 0xd5, 0x1a, 0xea],
+ [0x13, 0x50, 0x79, 0xa3, 0x23, 0x1c, 0xe6, 0x60],
+ [0x93, 0x2b, 0x28, 0x46, 0xe4, 0xd7, 0x06, 0x66],
+ [0xe1, 0x91, 0x5f, 0x5c, 0xb1, 0xec, 0xa4, 0x6c],
+ [0xf3, 0x25, 0x96, 0x5c, 0xa1, 0x6d, 0x62, 0x9f],
+ [0x57, 0x5f, 0xf2, 0x8e, 0x60, 0x38, 0x1b, 0xe5],
+ [0x72, 0x45, 0x06, 0xeb, 0x4c, 0x32, 0x8a, 0x95],
+ ];
+
+ let k0 = 0x_07_06_05_04_03_02_01_00;
+ let k1 = 0x_0f_0e_0d_0c_0b_0a_09_08;
+ let mut buf = Vec::new();
+ let mut t = 0;
+ let mut state_inc = SipHasher24::new_with_keys(k0, k1);
+
+ while t < 64 {
+ let vec = u8to64_le!(vecs[t], 0);
+ let out = hash_with(SipHasher24::new_with_keys(k0, k1), &Bytes(&buf));
+ assert_eq!(vec, out);
+
+ let full = hash_with(SipHasher24::new_with_keys(k0, k1), &Bytes(&buf));
+ let i = state_inc.finish();
+
+ assert_eq!(full, i);
+ assert_eq!(full, vec);
+
+ buf.push(t as u8);
+ Hasher::write(&mut state_inc, &[t as u8]);
+
+ t += 1;
+ }
+}
+#[test]
+fn test_hash_idempotent() {
+ let val64 = 0xdead_beef_dead_beef_u64;
+ assert_eq!(hash(&val64), hash(&val64));
+ let val32 = 0xdeadbeef_u32;
+ assert_eq!(hash(&val32), hash(&val32));
+}
+
+#[test]
+fn test_hash_no_bytes_dropped_64() {
+ let val = 0xdead_beef_dead_beef_u64;
+
+ assert_ne!(hash(&val), hash(&zero_byte(val, 0)));
+ assert_ne!(hash(&val), hash(&zero_byte(val, 1)));
+ assert_ne!(hash(&val), hash(&zero_byte(val, 2)));
+ assert_ne!(hash(&val), hash(&zero_byte(val, 3)));
+ assert_ne!(hash(&val), hash(&zero_byte(val, 4)));
+ assert_ne!(hash(&val), hash(&zero_byte(val, 5)));
+ assert_ne!(hash(&val), hash(&zero_byte(val, 6)));
+ assert_ne!(hash(&val), hash(&zero_byte(val, 7)));
+
+ fn zero_byte(val: u64, byte: usize) -> u64 {
+ assert!(byte < 8);
+ val & !(0xff << (byte * 8))
+ }
+}
+
+#[test]
+fn test_hash_no_bytes_dropped_32() {
+ let val = 0xdeadbeef_u32;
+
+ assert_ne!(hash(&val), hash(&zero_byte(val, 0)));
+ assert_ne!(hash(&val), hash(&zero_byte(val, 1)));
+ assert_ne!(hash(&val), hash(&zero_byte(val, 2)));
+ assert_ne!(hash(&val), hash(&zero_byte(val, 3)));
+
+ fn zero_byte(val: u32, byte: usize) -> u32 {
+ assert!(byte < 4);
+ val & !(0xff << (byte * 8))
+ }
+}
+
+#[test]
+fn test_hash_no_concat_alias() {
+ let s = ("aa", "bb");
+ let t = ("aabb", "");
+ let u = ("a", "abb");
+
+ assert!(s != t && t != u);
+ assert!(hash(&s) != hash(&t) && hash(&s) != hash(&u));
+
+ let u = [1, 0, 0, 0];
+ let v = (&u[..1], &u[1..3], &u[3..]);
+ let w = (&u[..], &u[4..4], &u[4..4]);
+
+ assert_ne!(v, w);
+ assert_ne!(hash(&v), hash(&w));
+}
+
+#[test]
+fn test_hash_serde() {
+ let val64 = 0xdead_beef_dead_beef_u64;
+ let hash = hash(&val64);
+ let serialized = serde_json::to_string(&hash).unwrap();
+ let deserialized: u64 = serde_json::from_str(&serialized).unwrap();
+ assert_eq!(hash, deserialized);
+}
diff --git a/third_party/rust/siphasher/src/tests128.rs b/third_party/rust/siphasher/src/tests128.rs
new file mode 100644
index 0000000000..0d9769646c
--- /dev/null
+++ b/third_party/rust/siphasher/src/tests128.rs
@@ -0,0 +1,103 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use super::sip128::{Hasher128, SipHasher, SipHasher13, SipHasher24};
+use std::hash::{Hash, Hasher};
+
+// Hash just the bytes of the slice, without length prefix
+struct Bytes<'a>(&'a [u8]);
+
+impl<'a> Hash for Bytes<'a> {
+ #[allow(unused_must_use)]
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ let Bytes(v) = *self;
+ state.write(v);
+ }
+}
+
+fn hash_with<H: Hasher + Hasher128, T: Hash>(mut st: H, x: &T) -> [u8; 16] {
+ x.hash(&mut st);
+ st.finish128().as_bytes()
+}
+
+fn hash<T: Hash>(x: &T) -> [u8; 16] {
+ hash_with(SipHasher::new(), x)
+}
+
+#[test]
+#[allow(unused_must_use)]
+fn test_siphash128_1_3() {
+ let vecs: [[u8; 16]; 1] = [[
+ 231, 126, 188, 178, 39, 136, 165, 190, 253, 98, 219, 106, 221, 48, 48, 1,
+ ]];
+
+ let k0 = 0x_07_06_05_04_03_02_01_00;
+ let k1 = 0x_0f_0e_0d_0c_0b_0a_09_08;
+ let mut buf = Vec::new();
+ let mut t = 0;
+ let mut state_inc = SipHasher13::new_with_keys(k0, k1);
+
+ while t < 1 {
+ let vec = vecs[t];
+ let out = hash_with(SipHasher13::new_with_keys(k0, k1), &Bytes(&buf));
+ assert_eq!(vec, out[..]);
+
+ let full = hash_with(SipHasher13::new_with_keys(k0, k1), &Bytes(&buf));
+ let i = state_inc.finish128().as_bytes();
+
+ assert_eq!(full, i);
+ assert_eq!(full, vec);
+
+ buf.push(t as u8);
+ Hasher::write(&mut state_inc, &[t as u8]);
+
+ t += 1;
+ }
+}
+
+#[test]
+#[allow(unused_must_use)]
+fn test_siphash128_2_4() {
+ let vecs: [[u8; 16]; 1] = [[
+ 163, 129, 127, 4, 186, 37, 168, 230, 109, 246, 114, 20, 199, 85, 2, 147,
+ ]];
+
+ let k0 = 0x_07_06_05_04_03_02_01_00;
+ let k1 = 0x_0f_0e_0d_0c_0b_0a_09_08;
+ let mut buf = Vec::new();
+ let mut t = 0;
+ let mut state_inc = SipHasher24::new_with_keys(k0, k1);
+
+ while t < 1 {
+ let vec = vecs[t];
+ let out = hash_with(SipHasher24::new_with_keys(k0, k1), &Bytes(&buf));
+ assert_eq!(vec, out[..]);
+
+ let full = hash_with(SipHasher24::new_with_keys(k0, k1), &Bytes(&buf));
+ let i = state_inc.finish128().as_bytes();
+
+ assert_eq!(full, i);
+ assert_eq!(full, vec);
+
+ buf.push(t as u8);
+ Hasher::write(&mut state_inc, &[t as u8]);
+
+ t += 1;
+ }
+}
+
+#[test]
+fn test_siphash128_serde() {
+ let val64 = 0xdead_beef_dead_beef_u64;
+ let hash = hash(&val64);
+ let serialized = serde_json::to_string(&hash).unwrap();
+ let deserialized: [u8; 16] = serde_json::from_str(&serialized).unwrap();
+ assert_eq!(hash, deserialized);
+}