//! This is a copy of the `rustc_hash` crate, adapted to work as a module. //! //! If in the future it becomes more reasonable to add dependencies to //! `proc_macro`, this module should be removed and replaced with a dependency //! on the `rustc_hash` crate. use std::collections::HashMap; use std::convert::TryInto; use std::default::Default; use std::hash::BuildHasherDefault; use std::hash::Hasher; use std::mem::size_of; use std::ops::BitXor; /// Type alias for a hashmap using the `fx` hash algorithm. pub type FxHashMap = HashMap>; /// A speedy hash algorithm for use within rustc. The hashmap in alloc by /// default uses SipHash which isn't quite as speedy as we want. In the compiler /// we're not really worried about DOS attempts, so we use a fast /// non-cryptographic hash. /// /// This is the same as the algorithm used by Firefox -- which is a homespun /// one not based on any widely-known algorithm -- though modified to produce /// 64-bit hash values instead of 32-bit hash values. It consistently /// out-performs an FNV-based hash within rustc itself -- the collision rate is /// similar or slightly worse than FNV, but the speed of the hash function /// itself is much higher because it works on up to 8 bytes at a time. pub struct FxHasher { hash: usize, } #[cfg(target_pointer_width = "32")] const K: usize = 0x9e3779b9; #[cfg(target_pointer_width = "64")] const K: usize = 0x517cc1b727220a95; impl Default for FxHasher { #[inline] fn default() -> FxHasher { FxHasher { hash: 0 } } } impl FxHasher { #[inline] fn add_to_hash(&mut self, i: usize) { self.hash = self.hash.rotate_left(5).bitxor(i).wrapping_mul(K); } } impl Hasher for FxHasher { #[inline] fn write(&mut self, mut bytes: &[u8]) { #[cfg(target_pointer_width = "32")] let read_usize = |bytes: &[u8]| u32::from_ne_bytes(bytes[..4].try_into().unwrap()); #[cfg(target_pointer_width = "64")] let read_usize = |bytes: &[u8]| u64::from_ne_bytes(bytes[..8].try_into().unwrap()); let mut hash = FxHasher { hash: self.hash }; assert!(size_of::() <= 8); while bytes.len() >= size_of::() { hash.add_to_hash(read_usize(bytes) as usize); bytes = &bytes[size_of::()..]; } if (size_of::() > 4) && (bytes.len() >= 4) { hash.add_to_hash(u32::from_ne_bytes(bytes[..4].try_into().unwrap()) as usize); bytes = &bytes[4..]; } if (size_of::() > 2) && bytes.len() >= 2 { hash.add_to_hash(u16::from_ne_bytes(bytes[..2].try_into().unwrap()) as usize); bytes = &bytes[2..]; } if (size_of::() > 1) && bytes.len() >= 1 { hash.add_to_hash(bytes[0] as usize); } self.hash = hash.hash; } #[inline] fn write_u8(&mut self, i: u8) { self.add_to_hash(i as usize); } #[inline] fn write_u16(&mut self, i: u16) { self.add_to_hash(i as usize); } #[inline] fn write_u32(&mut self, i: u32) { self.add_to_hash(i as usize); } #[cfg(target_pointer_width = "32")] #[inline] fn write_u64(&mut self, i: u64) { self.add_to_hash(i as usize); self.add_to_hash((i >> 32) as usize); } #[cfg(target_pointer_width = "64")] #[inline] fn write_u64(&mut self, i: u64) { self.add_to_hash(i as usize); } #[inline] fn write_usize(&mut self, i: usize) { self.add_to_hash(i); } #[inline] fn finish(&self) -> u64 { self.hash as u64 } }