// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 or the MIT license // , at your // option. This file may not be copied, modified, or distributed // except according to those terms. use self::Entry::*; use self::VacantEntryState::*; use std::borrow::Borrow; use std::cmp::max; use std::fmt::{self, Debug}; #[allow(deprecated)] use std::hash::{BuildHasher, Hash}; use std::iter::FromIterator; use std::mem::{self, replace}; use std::ops::{Deref, Index}; use super::table::BucketState::{Empty, Full}; use super::table::{self, Bucket, EmptyBucket, FullBucket, FullBucketMut, RawTable, SafeHash}; use crate::FailedAllocationError; const MIN_NONZERO_RAW_CAPACITY: usize = 32; // must be a power of two /// The default behavior of HashMap implements a maximum load factor of 90.9%. #[derive(Clone)] struct DefaultResizePolicy; impl DefaultResizePolicy { fn new() -> DefaultResizePolicy { DefaultResizePolicy } /// A hash map's "capacity" is the number of elements it can hold without /// being resized. Its "raw capacity" is the number of slots required to /// provide that capacity, accounting for maximum loading. The raw capacity /// is always zero or a power of two. #[inline] fn raw_capacity(&self, len: usize) -> usize { if len == 0 { 0 } else { // 1. Account for loading: `raw_capacity >= len * 1.1`. // 2. Ensure it is a power of two. // 3. Ensure it is at least the minimum size. let mut raw_cap = len * 11 / 10; assert!(raw_cap >= len, "raw_cap overflow"); raw_cap = raw_cap .checked_next_power_of_two() .expect("raw_capacity overflow"); raw_cap = max(MIN_NONZERO_RAW_CAPACITY, raw_cap); raw_cap } } /// The capacity of the given raw capacity. #[inline] fn capacity(&self, raw_cap: usize) -> usize { // This doesn't have to be checked for overflow since allocation size // in bytes will overflow earlier than multiplication by 10. // // As per https://github.com/rust-lang/rust/pull/30991 this is updated // to be: (raw_cap * den + den - 1) / num (raw_cap * 10 + 10 - 1) / 11 } } // The main performance trick in this hashmap is called Robin Hood Hashing. // It gains its excellent performance from one essential operation: // // If an insertion collides with an existing element, and that element's // "probe distance" (how far away the element is from its ideal location) // is higher than how far we've already probed, swap the elements. // // This massively lowers variance in probe distance, and allows us to get very // high load factors with good performance. The 90% load factor I use is rather // conservative. // // > Why a load factor of approximately 90%? // // In general, all the distances to initial buckets will converge on the mean. // At a load factor of α, the odds of finding the target bucket after k // probes is approximately 1-α^k. If we set this equal to 50% (since we converge // on the mean) and set k=8 (64-byte cache line / 8-byte hash), α=0.92. I round // this down to make the math easier on the CPU and avoid its FPU. // Since on average we start the probing in the middle of a cache line, this // strategy pulls in two cache lines of hashes on every lookup. I think that's // pretty good, but if you want to trade off some space, it could go down to one // cache line on average with an α of 0.84. // // > Wait, what? Where did you get 1-α^k from? // // On the first probe, your odds of a collision with an existing element is α. // The odds of doing this twice in a row is approximately α^2. For three times, // α^3, etc. Therefore, the odds of colliding k times is α^k. The odds of NOT // colliding after k tries is 1-α^k. // // The paper from 1986 cited below mentions an implementation which keeps track // of the distance-to-initial-bucket histogram. This approach is not suitable // for modern architectures because it requires maintaining an internal data // structure. This allows very good first guesses, but we are most concerned // with guessing entire cache lines, not individual indexes. Furthermore, array // accesses are no longer linear and in one direction, as we have now. There // is also memory and cache pressure that this would entail that would be very // difficult to properly see in a microbenchmark. // // ## Future Improvements (FIXME!) // // Allow the load factor to be changed dynamically and/or at initialization. // // Also, would it be possible for us to reuse storage when growing the // underlying table? This is exactly the use case for 'realloc', and may // be worth exploring. // // ## Future Optimizations (FIXME!) // // Another possible design choice that I made without any real reason is // parameterizing the raw table over keys and values. Technically, all we need // is the size and alignment of keys and values, and the code should be just as // efficient (well, we might need one for power-of-two size and one for not...). // This has the potential to reduce code bloat in rust executables, without // really losing anything except 4 words (key size, key alignment, val size, // val alignment) which can be passed in to every call of a `RawTable` function. // This would definitely be an avenue worth exploring if people start complaining // about the size of rust executables. // // Annotate exceedingly likely branches in `table::make_hash` // and `search_hashed` to reduce instruction cache pressure // and mispredictions once it becomes possible (blocked on issue #11092). // // Shrinking the table could simply reallocate in place after moving buckets // to the first half. // // The growth algorithm (fragment of the Proof of Correctness) // -------------------- // // The growth algorithm is basically a fast path of the naive reinsertion- // during-resize algorithm. Other paths should never be taken. // // Consider growing a robin hood hashtable of capacity n. Normally, we do this // by allocating a new table of capacity `2n`, and then individually reinsert // each element in the old table into the new one. This guarantees that the // new table is a valid robin hood hashtable with all the desired statistical // properties. Remark that the order we reinsert the elements in should not // matter. For simplicity and efficiency, we will consider only linear // reinsertions, which consist of reinserting all elements in the old table // into the new one by increasing order of index. However we will not be // starting our reinsertions from index 0 in general. If we start from index // i, for the purpose of reinsertion we will consider all elements with real // index j < i to have virtual index n + j. // // Our hash generation scheme consists of generating a 64-bit hash and // truncating the most significant bits. When moving to the new table, we // simply introduce a new bit to the front of the hash. Therefore, if an // elements has ideal index i in the old table, it can have one of two ideal // locations in the new table. If the new bit is 0, then the new ideal index // is i. If the new bit is 1, then the new ideal index is n + i. Intuitively, // we are producing two independent tables of size n, and for each element we // independently choose which table to insert it into with equal probability. // However the rather than wrapping around themselves on overflowing their // indexes, the first table overflows into the first, and the first into the // second. Visually, our new table will look something like: // // [yy_xxx_xxxx_xxx|xx_yyy_yyyy_yyy] // // Where x's are elements inserted into the first table, y's are elements // inserted into the second, and _'s are empty sections. We now define a few // key concepts that we will use later. Note that this is a very abstract // perspective of the table. A real resized table would be at least half // empty. // // Theorem: A linear robin hood reinsertion from the first ideal element // produces identical results to a linear naive reinsertion from the same // element. // // FIXME(Gankro, pczarn): review the proof and put it all in a separate README.md // // Adaptive early resizing // ---------------------- // To protect against degenerate performance scenarios (including DOS attacks), // the implementation includes an adaptive behavior that can resize the map // early (before its capacity is exceeded) when suspiciously long probe sequences // are encountered. // // With this algorithm in place it would be possible to turn a CPU attack into // a memory attack due to the aggressive resizing. To prevent that the // adaptive behavior only triggers when the map is at least half full. // This reduces the effectiveness of the algorithm but also makes it completely safe. // // The previous safety measure also prevents degenerate interactions with // really bad quality hash algorithms that can make normal inputs look like a // DOS attack. // const DISPLACEMENT_THRESHOLD: usize = 128; // // The threshold of 128 is chosen to minimize the chance of exceeding it. // In particular, we want that chance to be less than 10^-8 with a load of 90%. // For displacement, the smallest constant that fits our needs is 90, // so we round that up to 128. // // At a load factor of α, the odds of finding the target bucket after exactly n // unsuccessful probes[1] are // // Pr_α{displacement = n} = // (1 - α) / α * ∑_{k≥1} e^(-kα) * (kα)^(k+n) / (k + n)! * (1 - kα / (k + n + 1)) // // We use this formula to find the probability of triggering the adaptive behavior // // Pr_0.909{displacement > 128} = 1.601 * 10^-11 // // 1. Alfredo Viola (2005). Distributional analysis of Robin Hood linear probing // hashing with buckets. /// A hash map implemented with linear probing and Robin Hood bucket stealing. /// /// By default, `HashMap` uses a hashing algorithm selected to provide /// resistance against HashDoS attacks. The algorithm is randomly seeded, and a /// reasonable best-effort is made to generate this seed from a high quality, /// secure source of randomness provided by the host without blocking the /// program. Because of this, the randomness of the seed depends on the output /// quality of the system's random number generator when the seed is created. /// In particular, seeds generated when the system's entropy pool is abnormally /// low such as during system boot may be of a lower quality. /// /// The default hashing algorithm is currently SipHash 1-3, though this is /// subject to change at any point in the future. While its performance is very /// competitive for medium sized keys, other hashing algorithms will outperform /// it for small keys such as integers as well as large keys such as long /// strings, though those algorithms will typically *not* protect against /// attacks such as HashDoS. /// /// The hashing algorithm can be replaced on a per-`HashMap` basis using the /// [`default`], [`with_hasher`], and [`with_capacity_and_hasher`] methods. Many /// alternative algorithms are available on crates.io, such as the [`fnv`] crate. /// /// It is required that the keys implement the [`Eq`] and [`Hash`] traits, although /// this can frequently be achieved by using `#[derive(PartialEq, Eq, Hash)]`. /// If you implement these yourself, it is important that the following /// property holds: /// /// ```text /// k1 == k2 -> hash(k1) == hash(k2) /// ``` /// /// In other words, if two keys are equal, their hashes must be equal. /// /// It is a logic error for a key to be modified in such a way that the key's /// hash, as determined by the [`Hash`] trait, or its equality, as determined by /// the [`Eq`] trait, changes while it is in the map. This is normally only /// possible through [`Cell`], [`RefCell`], global state, I/O, or unsafe code. /// /// Relevant papers/articles: /// /// 1. Pedro Celis. ["Robin Hood Hashing"](https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf) /// 2. Emmanuel Goossaert. ["Robin Hood /// hashing"](http://codecapsule.com/2013/11/11/robin-hood-hashing/) /// 3. Emmanuel Goossaert. ["Robin Hood hashing: backward shift /// deletion"](http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/) /// /// # Examples /// /// ``` /// use std::collections::HashMap; /// /// // type inference lets us omit an explicit type signature (which /// // would be `HashMap<&str, &str>` in this example). /// let mut book_reviews = HashMap::new(); /// /// // review some books. /// book_reviews.insert("Adventures of Huckleberry Finn", "My favorite book."); /// book_reviews.insert("Grimms' Fairy Tales", "Masterpiece."); /// book_reviews.insert("Pride and Prejudice", "Very enjoyable."); /// book_reviews.insert("The Adventures of Sherlock Holmes", "Eye lyked it alot."); /// /// // check for a specific one. /// if !book_reviews.contains_key("Les Misérables") { /// println!("We've got {} reviews, but Les Misérables ain't one.", /// book_reviews.len()); /// } /// /// // oops, this review has a lot of spelling mistakes, let's delete it. /// book_reviews.remove("The Adventures of Sherlock Holmes"); /// /// // look up the values associated with some keys. /// let to_find = ["Pride and Prejudice", "Alice's Adventure in Wonderland"]; /// for book in &to_find { /// match book_reviews.get(book) { /// Some(review) => println!("{}: {}", book, review), /// None => println!("{} is unreviewed.", book) /// } /// } /// /// // iterate over everything. /// for (book, review) in &book_reviews { /// println!("{}: \"{}\"", book, review); /// } /// ``` /// /// `HashMap` also implements an [`Entry API`](#method.entry), which allows /// for more complex methods of getting, setting, updating and removing keys and /// their values: /// /// ``` /// use std::collections::HashMap; /// /// // type inference lets us omit an explicit type signature (which /// // would be `HashMap<&str, u8>` in this example). /// let mut player_stats = HashMap::new(); /// /// fn random_stat_buff() -> u8 { /// // could actually return some random value here - let's just return /// // some fixed value for now /// 42 /// } /// /// // insert a key only if it doesn't already exist /// player_stats.entry("health").or_insert(100); /// /// // insert a key using a function that provides a new value only if it /// // doesn't already exist /// player_stats.entry("defence").or_insert_with(random_stat_buff); /// /// // update a key, guarding against the key possibly not being set /// let stat = player_stats.entry("attack").or_insert(100); /// *stat += random_stat_buff(); /// ``` /// /// The easiest way to use `HashMap` with a custom type as key is to derive [`Eq`] and [`Hash`]. /// We must also derive [`PartialEq`]. /// /// [`Eq`]: ../../std/cmp/trait.Eq.html /// [`Hash`]: ../../std/hash/trait.Hash.html /// [`PartialEq`]: ../../std/cmp/trait.PartialEq.html /// [`RefCell`]: ../../std/cell/struct.RefCell.html /// [`Cell`]: ../../std/cell/struct.Cell.html /// [`default`]: #method.default /// [`with_hasher`]: #method.with_hasher /// [`with_capacity_and_hasher`]: #method.with_capacity_and_hasher /// [`fnv`]: https://crates.io/crates/fnv /// /// ``` /// use std::collections::HashMap; /// /// #[derive(Hash, Eq, PartialEq, Debug)] /// struct Viking { /// name: String, /// country: String, /// } /// /// impl Viking { /// /// Create a new Viking. /// fn new(name: &str, country: &str) -> Viking { /// Viking { name: name.to_string(), country: country.to_string() } /// } /// } /// /// // Use a HashMap to store the vikings' health points. /// let mut vikings = HashMap::new(); /// /// vikings.insert(Viking::new("Einar", "Norway"), 25); /// vikings.insert(Viking::new("Olaf", "Denmark"), 24); /// vikings.insert(Viking::new("Harald", "Iceland"), 12); /// /// // Use derived implementation to print the status of the vikings. /// for (viking, health) in &vikings { /// println!("{:?} has {} hp", viking, health); /// } /// ``` /// /// A `HashMap` with fixed list of elements can be initialized from an array: /// /// ``` /// use std::collections::HashMap; /// /// fn main() { /// let timber_resources: HashMap<&str, i32> = /// [("Norway", 100), /// ("Denmark", 50), /// ("Iceland", 10)] /// .iter().cloned().collect(); /// // use the values stored in map /// } /// ``` #[derive(Clone)] pub struct HashMap { // All hashes are keyed on these values, to prevent hash collision attacks. hash_builder: S, table: RawTable, resize_policy: DefaultResizePolicy, } /// Search for a pre-hashed key. #[inline] fn search_hashed(table: M, hash: SafeHash, mut is_match: F) -> InternalEntry where M: Deref>, F: FnMut(&K) -> bool, { // This is the only function where capacity can be zero. To avoid // undefined behavior when Bucket::new gets the raw bucket in this // case, immediately return the appropriate search result. if table.capacity() == 0 { return InternalEntry::TableIsEmpty; } let size = table.size(); let mut probe = Bucket::new(table, hash); let mut displacement = 0; loop { let full = match probe.peek() { Empty(bucket) => { // Found a hole! return InternalEntry::Vacant { hash, elem: NoElem(bucket, displacement), }; }, Full(bucket) => bucket, }; let probe_displacement = full.displacement(); if probe_displacement < displacement { // Found a luckier bucket than me. // We can finish the search early if we hit any bucket // with a lower distance to initial bucket than we've probed. return InternalEntry::Vacant { hash, elem: NeqElem(full, probe_displacement), }; } // If the hash doesn't match, it can't be this one.. if hash == full.hash() { // If the key doesn't match, it can't be this one.. if is_match(full.read().0) { return InternalEntry::Occupied { elem: full }; } } displacement += 1; probe = full.next(); debug_assert!(displacement <= size); } } fn pop_internal(starting_bucket: FullBucketMut) -> (K, V, &mut RawTable) { let (empty, retkey, retval) = starting_bucket.take(); let mut gap = match empty.gap_peek() { Ok(b) => b, Err(b) => return (retkey, retval, b.into_table()), }; while gap.full().displacement() != 0 { gap = match gap.shift() { Ok(b) => b, Err(b) => { return (retkey, retval, b.into_table()); }, }; } // Now we've done all our shifting. Return the value we grabbed earlier. (retkey, retval, gap.into_table()) } /// Perform robin hood bucket stealing at the given `bucket`. You must /// also pass that bucket's displacement so we don't have to recalculate it. /// /// `hash`, `key`, and `val` are the elements to "robin hood" into the hashtable. fn robin_hood<'a, K: 'a, V: 'a>( bucket: FullBucketMut<'a, K, V>, mut displacement: usize, mut hash: SafeHash, mut key: K, mut val: V, ) -> FullBucketMut<'a, K, V> { let size = bucket.table().size(); let raw_capacity = bucket.table().capacity(); // There can be at most `size - dib` buckets to displace, because // in the worst case, there are `size` elements and we already are // `displacement` buckets away from the initial one. let idx_end = (bucket.index() + size - bucket.displacement()) % raw_capacity; // Save the *starting point*. let mut bucket = bucket.stash(); loop { let (old_hash, old_key, old_val) = bucket.replace(hash, key, val); hash = old_hash; key = old_key; val = old_val; loop { displacement += 1; let probe = bucket.next(); debug_assert_ne!(probe.index(), idx_end); let full_bucket = match probe.peek() { Empty(bucket) => { // Found a hole! let bucket = bucket.put(hash, key, val); // Now that it's stolen, just read the value's pointer // right out of the table! Go back to the *starting point*. // // This use of `into_table` is misleading. It turns the // bucket, which is a FullBucket on top of a // FullBucketMut, into just one FullBucketMut. The "table" // refers to the inner FullBucketMut in this context. return bucket.into_table(); }, Full(bucket) => bucket, }; let probe_displacement = full_bucket.displacement(); bucket = full_bucket; // Robin hood! Steal the spot. if probe_displacement < displacement { displacement = probe_displacement; break; } } } } impl HashMap where K: Eq + Hash, S: BuildHasher, { fn make_hash(&self, x: &X) -> SafeHash where X: Hash, { table::make_hash(&self.hash_builder, x) } /// Search for a key, yielding the index if it's found in the hashtable. /// If you already have the hash for the key lying around, use /// search_hashed. #[inline] fn search<'a, Q: ?Sized>(&'a self, q: &Q) -> InternalEntry> where K: Borrow, Q: Eq + Hash, { let hash = self.make_hash(q); search_hashed(&self.table, hash, |k| q.eq(k.borrow())) } #[inline] fn search_mut<'a, Q: ?Sized>(&'a mut self, q: &Q) -> InternalEntry> where K: Borrow, Q: Eq + Hash, { let hash = self.make_hash(q); search_hashed(&mut self.table, hash, |k| q.eq(k.borrow())) } // The caller should ensure that invariants by Robin Hood Hashing hold // and that there's space in the underlying table. fn insert_hashed_ordered(&mut self, hash: SafeHash, k: K, v: V) { let mut buckets = Bucket::new(&mut self.table, hash); let start_index = buckets.index(); loop { // We don't need to compare hashes for value swap. // Not even DIBs for Robin Hood. buckets = match buckets.peek() { Empty(empty) => { empty.put(hash, k, v); return; }, Full(b) => b.into_bucket(), }; buckets.next(); debug_assert_ne!(buckets.index(), start_index); } } } impl HashMap where K: Eq + Hash, S: BuildHasher, { /// Creates an empty `HashMap` which will use the given hash builder to hash /// keys. /// /// The created map has the default initial capacity. /// /// Warning: `hash_builder` is normally randomly generated, and /// is designed to allow HashMaps to be resistant to attacks that /// cause many collisions and very poor performance. Setting it /// manually using this function can expose a DoS attack vector. /// /// # Examples /// /// ``` /// use std::collections::HashMap; /// use std::collections::hash_map::RandomState; /// /// let s = RandomState::new(); /// let mut map = HashMap::with_hasher(s); /// map.insert(1, 2); /// ``` #[inline] pub fn try_with_hasher(hash_builder: S) -> Result, FailedAllocationError> { Ok(HashMap { hash_builder, resize_policy: DefaultResizePolicy::new(), table: RawTable::new(0)?, }) } #[inline] pub fn with_hasher(hash_builder: S) -> HashMap { Self::try_with_hasher(hash_builder).unwrap() } /// Creates an empty `HashMap` with the specified capacity, using `hash_builder` /// to hash the keys. /// /// The hash map will be able to hold at least `capacity` elements without /// reallocating. If `capacity` is 0, the hash map will not allocate. /// /// Warning: `hash_builder` is normally randomly generated, and /// is designed to allow HashMaps to be resistant to attacks that /// cause many collisions and very poor performance. Setting it /// manually using this function can expose a DoS attack vector. /// /// # Examples /// /// ``` /// use std::collections::HashMap; /// use std::collections::hash_map::RandomState; /// /// let s = RandomState::new(); /// let mut map = HashMap::with_capacity_and_hasher(10, s); /// map.insert(1, 2); /// ``` #[inline] pub fn try_with_capacity_and_hasher( capacity: usize, hash_builder: S, ) -> Result, FailedAllocationError> { let resize_policy = DefaultResizePolicy::new(); let raw_cap = resize_policy.raw_capacity(capacity); Ok(HashMap { hash_builder, resize_policy, table: RawTable::new(raw_cap)?, }) } pub fn with_capacity_and_hasher(capacity: usize, hash_builder: S) -> HashMap { Self::try_with_capacity_and_hasher(capacity, hash_builder).unwrap() } /// Returns a reference to the map's [`BuildHasher`]. /// /// [`BuildHasher`]: ../../std/hash/trait.BuildHasher.html pub fn hasher(&self) -> &S { &self.hash_builder } /// Returns the number of elements the map can hold without reallocating. /// /// This number is a lower bound; the `HashMap` might be able to hold /// more, but is guaranteed to be able to hold at least this many. /// /// # Examples /// /// ``` /// use std::collections::HashMap; /// let map: HashMap = HashMap::with_capacity(100); /// assert!(map.capacity() >= 100); /// ``` #[inline] pub fn capacity(&self) -> usize { self.resize_policy.capacity(self.raw_capacity()) } /// Returns the hash map's raw capacity. #[inline] fn raw_capacity(&self) -> usize { self.table.capacity() } /// Reserves capacity for at least `additional` more elements to be inserted /// in the `HashMap`. The collection may reserve more space to avoid /// frequent reallocations. /// /// # Panics /// /// Panics if the new allocation size overflows [`usize`]. /// /// [`usize`]: ../../std/primitive.usize.html /// /// # Examples /// /// ``` /// use std::collections::HashMap; /// let mut map: HashMap<&str, isize> = HashMap::new(); /// map.reserve(10); /// ``` pub fn reserve(&mut self, additional: usize) { self.try_reserve(additional).unwrap(); } #[inline] pub fn try_reserve(&mut self, additional: usize) -> Result<(), FailedAllocationError> { let remaining = self.capacity() - self.len(); // this can't overflow if remaining < additional { let min_cap = self .len() .checked_add(additional) .expect("reserve overflow"); let raw_cap = self.resize_policy.raw_capacity(min_cap); self.try_resize(raw_cap)?; } else if self.table.tag() && remaining <= self.len() { // Probe sequence is too long and table is half full, // resize early to reduce probing length. let new_capacity = self.table.capacity() * 2; self.try_resize(new_capacity)?; } Ok(()) } #[cold] #[inline(never)] fn try_resize(&mut self, new_raw_cap: usize) -> Result<(), FailedAllocationError> { assert!(self.table.size() <= new_raw_cap); assert!(new_raw_cap.is_power_of_two() || new_raw_cap == 0); let mut old_table = replace(&mut self.table, RawTable::new(new_raw_cap)?); let old_size = old_table.size(); if old_table.size() == 0 { return Ok(()); } let mut bucket = Bucket::head_bucket(&mut old_table); // This is how the buckets might be laid out in memory: // ($ marks an initialized bucket) // ________________ // |$$$_$$$$$$_$$$$$| // // But we've skipped the entire initial cluster of buckets // and will continue iteration in this order: // ________________ // |$$$$$$_$$$$$ // ^ wrap around once end is reached // ________________ // $$$_____________| // ^ exit once table.size == 0 loop { bucket = match bucket.peek() { Full(bucket) => { let h = bucket.hash(); let (b, k, v) = bucket.take(); self.insert_hashed_ordered(h, k, v); if b.table().size() == 0 { break; } b.into_bucket() }, Empty(b) => b.into_bucket(), }; bucket.next(); } assert_eq!(self.table.size(), old_size); Ok(()) } /// Shrinks the capacity of the map as much as possible. It will drop /// down as much as possible while maintaining the internal rules /// and possibly leaving some space in accordance with the resize policy. /// /// # Examples /// /// ``` /// use std::collections::HashMap; /// /// let mut map: HashMap = HashMap::with_capacity(100); /// map.insert(1, 2); /// map.insert(3, 4); /// assert!(map.capacity() >= 100); /// map.shrink_to_fit(); /// assert!(map.capacity() >= 2); /// ``` pub fn shrink_to_fit(&mut self) { self.try_shrink_to_fit().unwrap(); } pub fn try_shrink_to_fit(&mut self) -> Result<(), FailedAllocationError> { let new_raw_cap = self.resize_policy.raw_capacity(self.len()); if self.raw_capacity() != new_raw_cap { let old_table = replace(&mut self.table, RawTable::new(new_raw_cap)?); let old_size = old_table.size(); // Shrink the table. Naive algorithm for resizing: for (h, k, v) in old_table.into_iter() { self.insert_hashed_nocheck(h, k, v); } debug_assert_eq!(self.table.size(), old_size); } Ok(()) } /// Insert a pre-hashed key-value pair, without first checking /// that there's enough room in the buckets. Returns a reference to the /// newly insert value. /// /// If the key already exists, the hashtable will be returned untouched /// and a reference to the existing element will be returned. fn insert_hashed_nocheck(&mut self, hash: SafeHash, k: K, v: V) -> Option { let entry = search_hashed(&mut self.table, hash, |key| *key == k).into_entry(k); match entry { Some(Occupied(mut elem)) => Some(elem.insert(v)), Some(Vacant(elem)) => { elem.insert(v); None }, None => unreachable!(), } } /// An iterator visiting all keys in arbitrary order. /// The iterator element type is `&'a K`. /// /// # Examples /// /// ``` /// use std::collections::HashMap; /// /// let mut map = HashMap::new(); /// map.insert("a", 1); /// map.insert("b", 2); /// map.insert("c", 3); /// /// for key in map.keys() { /// println!("{}", key); /// } /// ``` pub fn keys(&self) -> Keys { Keys { inner: self.iter() } } /// An iterator visiting all values in arbitrary order. /// The iterator element type is `&'a V`. /// /// # Examples /// /// ``` /// use std::collections::HashMap; /// /// let mut map = HashMap::new(); /// map.insert("a", 1); /// map.insert("b", 2); /// map.insert("c", 3); /// /// for val in map.values() { /// println!("{}", val); /// } /// ``` pub fn values(&self) -> Values { Values { inner: self.iter() } } /// An iterator visiting all values mutably in arbitrary order. /// The iterator element type is `&'a mut V`. /// /// # Examples /// /// ``` /// use std::collections::HashMap; /// /// let mut map = HashMap::new(); /// /// map.insert("a", 1); /// map.insert("b", 2); /// map.insert("c", 3); /// /// for val in map.values_mut() { /// *val = *val + 10; /// } /// /// for val in map.values() { /// println!("{}", val); /// } /// ``` pub fn values_mut(&mut self) -> ValuesMut { ValuesMut { inner: self.iter_mut(), } } /// An iterator visiting all key-value pairs in arbitrary order. /// The iterator element type is `(&'a K, &'a V)`. /// /// # Examples /// /// ``` /// use std::collections::HashMap; /// /// let mut map = HashMap::new(); /// map.insert("a", 1); /// map.insert("b", 2); /// map.insert("c", 3); /// /// for (key, val) in map.iter() { /// println!("key: {} val: {}", key, val); /// } /// ``` pub fn iter(&self) -> Iter { Iter { inner: self.table.iter(), } } /// An iterator visiting all key-value pairs in arbitrary order, /// with mutable references to the values. /// The iterator element type is `(&'a K, &'a mut V)`. /// /// # Examples /// /// ``` /// use std::collections::HashMap; /// /// let mut map = HashMap::new(); /// map.insert("a", 1); /// map.insert("b", 2); /// map.insert("c", 3); /// /// // Update all values /// for (_, val) in map.iter_mut() { /// *val *= 2; /// } /// /// for (key, val) in &map { /// println!("key: {} val: {}", key, val); /// } /// ``` pub fn iter_mut(&mut self) -> IterMut { IterMut { inner: self.table.iter_mut(), } } /// Gets the given key's corresponding entry in the map for in-place manipulation. /// /// # Examples /// /// ``` /// use std::collections::HashMap; /// /// let mut letters = HashMap::new(); /// /// for ch in "a short treatise on fungi".chars() { /// let counter = letters.entry(ch).or_insert(0); /// *counter += 1; /// } /// /// assert_eq!(letters[&'s'], 2); /// assert_eq!(letters[&'t'], 3); /// assert_eq!(letters[&'u'], 1); /// assert_eq!(letters.get(&'y'), None); /// ``` pub fn entry(&mut self, key: K) -> Entry { self.try_entry(key).unwrap() } #[inline(always)] pub fn try_entry(&mut self, key: K) -> Result, FailedAllocationError> { // Gotta resize now. self.try_reserve(1)?; let hash = self.make_hash(&key); Ok(search_hashed(&mut self.table, hash, |q| q.eq(&key)) .into_entry(key) .expect("unreachable")) } /// Returns the number of elements in the map. /// /// # Examples /// /// ``` /// use std::collections::HashMap; /// /// let mut a = HashMap::new(); /// assert_eq!(a.len(), 0); /// a.insert(1, "a"); /// assert_eq!(a.len(), 1); /// ``` pub fn len(&self) -> usize { self.table.size() } /// Returns true if the map contains no elements. /// /// # Examples /// /// ``` /// use std::collections::HashMap; /// /// let mut a = HashMap::new(); /// assert!(a.is_empty()); /// a.insert(1, "a"); /// assert!(!a.is_empty()); /// ``` #[inline] pub fn is_empty(&self) -> bool { self.len() == 0 } /// Clears the map, returning all key-value pairs as an iterator. Keeps the /// allocated memory for reuse. /// /// # Examples /// /// ``` /// use std::collections::HashMap; /// /// let mut a = HashMap::new(); /// a.insert(1, "a"); /// a.insert(2, "b"); /// /// for (k, v) in a.drain().take(1) { /// assert!(k == 1 || k == 2); /// assert!(v == "a" || v == "b"); /// } /// /// assert!(a.is_empty()); /// ``` #[inline] pub fn drain(&mut self) -> Drain where K: 'static, V: 'static, { Drain { inner: self.table.drain(), } } /// Clears the map, removing all key-value pairs. Keeps the allocated memory /// for reuse. /// /// # Examples /// /// ``` /// use std::collections::HashMap; /// /// let mut a = HashMap::new(); /// a.insert(1, "a"); /// a.clear(); /// assert!(a.is_empty()); /// ``` #[inline] pub fn clear(&mut self) where K: 'static, V: 'static, { self.drain(); } /// Returns a reference to the value corresponding to the key. /// /// The key may be any borrowed form of the map's key type, but /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for /// the key type. /// /// [`Eq`]: ../../std/cmp/trait.Eq.html /// [`Hash`]: ../../std/hash/trait.Hash.html /// /// # Examples /// /// ``` /// use std::collections::HashMap; /// /// let mut map = HashMap::new(); /// map.insert(1, "a"); /// assert_eq!(map.get(&1), Some(&"a")); /// assert_eq!(map.get(&2), None); /// ``` pub fn get(&self, k: &Q) -> Option<&V> where K: Borrow, Q: Hash + Eq, { self.search(k) .into_occupied_bucket() .map(|bucket| bucket.into_refs().1) } /// Returns true if the map contains a value for the specified key. /// /// The key may be any borrowed form of the map's key type, but /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for /// the key type. /// /// [`Eq`]: ../../std/cmp/trait.Eq.html /// [`Hash`]: ../../std/hash/trait.Hash.html /// /// # Examples /// /// ``` /// use std::collections::HashMap; /// /// let mut map = HashMap::new(); /// map.insert(1, "a"); /// assert_eq!(map.contains_key(&1), true); /// assert_eq!(map.contains_key(&2), false); /// ``` pub fn contains_key(&self, k: &Q) -> bool where K: Borrow, Q: Hash + Eq, { self.search(k).into_occupied_bucket().is_some() } /// Returns a mutable reference to the value corresponding to the key. /// /// The key may be any borrowed form of the map's key type, but /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for /// the key type. /// /// [`Eq`]: ../../std/cmp/trait.Eq.html /// [`Hash`]: ../../std/hash/trait.Hash.html /// /// # Examples /// /// ``` /// use std::collections::HashMap; /// /// let mut map = HashMap::new(); /// map.insert(1, "a"); /// if let Some(x) = map.get_mut(&1) { /// *x = "b"; /// } /// assert_eq!(map[&1], "b"); /// ``` pub fn get_mut(&mut self, k: &Q) -> Option<&mut V> where K: Borrow, Q: Hash + Eq, { self.search_mut(k) .into_occupied_bucket() .map(|bucket| bucket.into_mut_refs().1) } /// Inserts a key-value pair into the map. /// /// If the map did not have this key present, [`None`] is returned. /// /// If the map did have this key present, the value is updated, and the old /// value is returned. The key is not updated, though; this matters for /// types that can be `==` without being identical. See the [module-level /// documentation] for more. /// /// [`None`]: ../../std/option/enum.Option.html#variant.None /// [module-level documentation]: index.html#insert-and-complex-keys /// /// # Examples /// /// ``` /// use std::collections::HashMap; /// /// let mut map = HashMap::new(); /// assert_eq!(map.insert(37, "a"), None); /// assert_eq!(map.is_empty(), false); /// /// map.insert(37, "b"); /// assert_eq!(map.insert(37, "c"), Some("b")); /// assert_eq!(map[&37], "c"); /// ``` pub fn insert(&mut self, k: K, v: V) -> Option { self.try_insert(k, v).unwrap() } #[inline] pub fn try_insert(&mut self, k: K, v: V) -> Result, FailedAllocationError> { let hash = self.make_hash(&k); self.try_reserve(1)?; Ok(self.insert_hashed_nocheck(hash, k, v)) } /// Removes a key from the map, returning the value at the key if the key /// was previously in the map. /// /// The key may be any borrowed form of the map's key type, but /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for /// the key type. /// /// [`Eq`]: ../../std/cmp/trait.Eq.html /// [`Hash`]: ../../std/hash/trait.Hash.html /// /// # Examples /// /// ``` /// use std::collections::HashMap; /// /// let mut map = HashMap::new(); /// map.insert(1, "a"); /// assert_eq!(map.remove(&1), Some("a")); /// assert_eq!(map.remove(&1), None); /// ``` pub fn remove(&mut self, k: &Q) -> Option where K: Borrow, Q: Hash + Eq, { if self.table.size() == 0 { return None; } self.search_mut(k) .into_occupied_bucket() .map(|bucket| pop_internal(bucket).1) } /// Retains only the elements specified by the predicate. /// /// In other words, remove all pairs `(k, v)` such that `f(&k,&mut v)` returns `false`. /// /// # Examples /// /// ``` /// use std::collections::HashMap; /// /// let mut map: HashMap = (0..8).map(|x|(x, x*10)).collect(); /// map.retain(|&k, _| k % 2 == 0); /// assert_eq!(map.len(), 4); /// ``` pub fn retain(&mut self, mut f: F) where F: FnMut(&K, &mut V) -> bool, { if self.table.size() == 0 { return; } let mut elems_left = self.table.size(); let mut bucket = Bucket::head_bucket(&mut self.table); bucket.prev(); let start_index = bucket.index(); while elems_left != 0 { bucket = match bucket.peek() { Full(mut full) => { elems_left -= 1; let should_remove = { let (k, v) = full.read_mut(); !f(k, v) }; if should_remove { let prev_raw = full.raw(); let (_, _, t) = pop_internal(full); Bucket::new_from(prev_raw, t) } else { full.into_bucket() } }, Empty(b) => b.into_bucket(), }; bucket.prev(); // reverse iteration debug_assert!(elems_left == 0 || bucket.index() != start_index); } } } impl PartialEq for HashMap where K: Eq + Hash, V: PartialEq, S: BuildHasher, { fn eq(&self, other: &HashMap) -> bool { if self.len() != other.len() { return false; } self.iter() .all(|(key, value)| other.get(key).map_or(false, |v| *value == *v)) } } impl Eq for HashMap where K: Eq + Hash, V: Eq, S: BuildHasher, { } impl Debug for HashMap where K: Eq + Hash + Debug, V: Debug, S: BuildHasher, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_map().entries(self.iter()).finish() } } impl Default for HashMap where K: Eq + Hash, S: BuildHasher + Default, { /// Creates an empty `HashMap`, with the `Default` value for the hasher. fn default() -> HashMap { HashMap::with_hasher(Default::default()) } } impl<'a, K, Q: ?Sized, V, S> Index<&'a Q> for HashMap where K: Eq + Hash + Borrow, Q: Eq + Hash, S: BuildHasher, { type Output = V; #[inline] fn index(&self, index: &Q) -> &V { self.get(index).expect("no entry found for key") } } /// An iterator over the entries of a `HashMap`. /// /// This `struct` is created by the [`iter`] method on [`HashMap`]. See its /// documentation for more. /// /// [`iter`]: struct.HashMap.html#method.iter /// [`HashMap`]: struct.HashMap.html pub struct Iter<'a, K: 'a, V: 'a> { inner: table::Iter<'a, K, V>, } // FIXME(#19839) Remove in favor of `#[derive(Clone)]` impl<'a, K, V> Clone for Iter<'a, K, V> { fn clone(&self) -> Iter<'a, K, V> { Iter { inner: self.inner.clone(), } } } impl<'a, K: Debug, V: Debug> fmt::Debug for Iter<'a, K, V> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_list().entries(self.clone()).finish() } } /// A mutable iterator over the entries of a `HashMap`. /// /// This `struct` is created by the [`iter_mut`] method on [`HashMap`]. See its /// documentation for more. /// /// [`iter_mut`]: struct.HashMap.html#method.iter_mut /// [`HashMap`]: struct.HashMap.html pub struct IterMut<'a, K: 'a, V: 'a> { inner: table::IterMut<'a, K, V>, } /// An owning iterator over the entries of a `HashMap`. /// /// This `struct` is created by the [`into_iter`] method on [`HashMap`][`HashMap`] /// (provided by the `IntoIterator` trait). See its documentation for more. /// /// [`into_iter`]: struct.HashMap.html#method.into_iter /// [`HashMap`]: struct.HashMap.html pub struct IntoIter { pub(super) inner: table::IntoIter, } /// An iterator over the keys of a `HashMap`. /// /// This `struct` is created by the [`keys`] method on [`HashMap`]. See its /// documentation for more. /// /// [`keys`]: struct.HashMap.html#method.keys /// [`HashMap`]: struct.HashMap.html pub struct Keys<'a, K: 'a, V: 'a> { inner: Iter<'a, K, V>, } // FIXME(#19839) Remove in favor of `#[derive(Clone)]` impl<'a, K, V> Clone for Keys<'a, K, V> { fn clone(&self) -> Keys<'a, K, V> { Keys { inner: self.inner.clone(), } } } impl<'a, K: Debug, V> fmt::Debug for Keys<'a, K, V> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_list().entries(self.clone()).finish() } } /// An iterator over the values of a `HashMap`. /// /// This `struct` is created by the [`values`] method on [`HashMap`]. See its /// documentation for more. /// /// [`values`]: struct.HashMap.html#method.values /// [`HashMap`]: struct.HashMap.html pub struct Values<'a, K: 'a, V: 'a> { inner: Iter<'a, K, V>, } // FIXME(#19839) Remove in favor of `#[derive(Clone)]` impl<'a, K, V> Clone for Values<'a, K, V> { fn clone(&self) -> Values<'a, K, V> { Values { inner: self.inner.clone(), } } } impl<'a, K, V: Debug> fmt::Debug for Values<'a, K, V> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_list().entries(self.clone()).finish() } } /// A draining iterator over the entries of a `HashMap`. /// /// This `struct` is created by the [`drain`] method on [`HashMap`]. See its /// documentation for more. /// /// [`drain`]: struct.HashMap.html#method.drain /// [`HashMap`]: struct.HashMap.html pub struct Drain<'a, K: 'static, V: 'static> { pub(super) inner: table::Drain<'a, K, V>, } /// A mutable iterator over the values of a `HashMap`. /// /// This `struct` is created by the [`values_mut`] method on [`HashMap`]. See its /// documentation for more. /// /// [`values_mut`]: struct.HashMap.html#method.values_mut /// [`HashMap`]: struct.HashMap.html pub struct ValuesMut<'a, K: 'a, V: 'a> { inner: IterMut<'a, K, V>, } enum InternalEntry { Occupied { elem: FullBucket, }, Vacant { hash: SafeHash, elem: VacantEntryState, }, TableIsEmpty, } impl InternalEntry { #[inline] fn into_occupied_bucket(self) -> Option> { match self { InternalEntry::Occupied { elem } => Some(elem), _ => None, } } } impl<'a, K, V> InternalEntry> { #[inline] fn into_entry(self, key: K) -> Option> { match self { InternalEntry::Occupied { elem } => Some(Occupied(OccupiedEntry { key: Some(key), elem, })), InternalEntry::Vacant { hash, elem } => Some(Vacant(VacantEntry { hash, key, elem })), InternalEntry::TableIsEmpty => None, } } } /// A view into a single entry in a map, which may either be vacant or occupied. /// /// This `enum` is constructed from the [`entry`] method on [`HashMap`]. /// /// [`HashMap`]: struct.HashMap.html /// [`entry`]: struct.HashMap.html#method.entry pub enum Entry<'a, K: 'a, V: 'a> { /// An occupied entry. Occupied(OccupiedEntry<'a, K, V>), /// A vacant entry. Vacant(VacantEntry<'a, K, V>), } impl<'a, K: 'a + Debug, V: 'a + Debug> Debug for Entry<'a, K, V> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { Vacant(ref v) => f.debug_tuple("Entry").field(v).finish(), Occupied(ref o) => f.debug_tuple("Entry").field(o).finish(), } } } /// A view into an occupied entry in a `HashMap`. /// It is part of the [`Entry`] enum. /// /// [`Entry`]: enum.Entry.html pub struct OccupiedEntry<'a, K: 'a, V: 'a> { key: Option, elem: FullBucket>, } impl<'a, K: 'a + Debug, V: 'a + Debug> Debug for OccupiedEntry<'a, K, V> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("OccupiedEntry") .field("key", self.key()) .field("value", self.get()) .finish() } } /// A view into a vacant entry in a `HashMap`. /// It is part of the [`Entry`] enum. /// /// [`Entry`]: enum.Entry.html pub struct VacantEntry<'a, K: 'a, V: 'a> { hash: SafeHash, key: K, elem: VacantEntryState>, } impl<'a, K: 'a + Debug, V: 'a> Debug for VacantEntry<'a, K, V> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_tuple("VacantEntry").field(self.key()).finish() } } /// Possible states of a VacantEntry. enum VacantEntryState { /// The index is occupied, but the key to insert has precedence, /// and will kick the current one out on insertion. NeqElem(FullBucket, usize), /// The index is genuinely vacant. NoElem(EmptyBucket, usize), } impl<'a, K, V, S> IntoIterator for &'a HashMap where K: Eq + Hash, S: BuildHasher, { type Item = (&'a K, &'a V); type IntoIter = Iter<'a, K, V>; fn into_iter(self) -> Iter<'a, K, V> { self.iter() } } impl<'a, K, V, S> IntoIterator for &'a mut HashMap where K: Eq + Hash, S: BuildHasher, { type Item = (&'a K, &'a mut V); type IntoIter = IterMut<'a, K, V>; fn into_iter(self) -> IterMut<'a, K, V> { self.iter_mut() } } impl IntoIterator for HashMap where K: Eq + Hash, S: BuildHasher, { type Item = (K, V); type IntoIter = IntoIter; /// Creates a consuming iterator, that is, one that moves each key-value /// pair out of the map in arbitrary order. The map cannot be used after /// calling this. /// /// # Examples /// /// ``` /// use std::collections::HashMap; /// /// let mut map = HashMap::new(); /// map.insert("a", 1); /// map.insert("b", 2); /// map.insert("c", 3); /// /// // Not possible with .iter() /// let vec: Vec<(&str, isize)> = map.into_iter().collect(); /// ``` fn into_iter(self) -> IntoIter { IntoIter { inner: self.table.into_iter(), } } } impl<'a, K, V> Iterator for Iter<'a, K, V> { type Item = (&'a K, &'a V); #[inline] fn next(&mut self) -> Option<(&'a K, &'a V)> { self.inner.next() } #[inline] fn size_hint(&self) -> (usize, Option) { self.inner.size_hint() } } impl<'a, K, V> ExactSizeIterator for Iter<'a, K, V> { #[inline] fn len(&self) -> usize { self.inner.len() } } impl<'a, K, V> Iterator for IterMut<'a, K, V> { type Item = (&'a K, &'a mut V); #[inline] fn next(&mut self) -> Option<(&'a K, &'a mut V)> { self.inner.next() } #[inline] fn size_hint(&self) -> (usize, Option) { self.inner.size_hint() } } impl<'a, K, V> ExactSizeIterator for IterMut<'a, K, V> { #[inline] fn len(&self) -> usize { self.inner.len() } } impl<'a, K, V> fmt::Debug for IterMut<'a, K, V> where K: fmt::Debug, V: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_list().entries(self.inner.iter()).finish() } } impl Iterator for IntoIter { type Item = (K, V); #[inline] fn next(&mut self) -> Option<(K, V)> { self.inner.next().map(|(_, k, v)| (k, v)) } #[inline] fn size_hint(&self) -> (usize, Option) { self.inner.size_hint() } } impl ExactSizeIterator for IntoIter { #[inline] fn len(&self) -> usize { self.inner.len() } } impl fmt::Debug for IntoIter { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_list().entries(self.inner.iter()).finish() } } impl<'a, K, V> Iterator for Keys<'a, K, V> { type Item = &'a K; #[inline] fn next(&mut self) -> Option<&'a K> { self.inner.next().map(|(k, _)| k) } #[inline] fn size_hint(&self) -> (usize, Option) { self.inner.size_hint() } } impl<'a, K, V> ExactSizeIterator for Keys<'a, K, V> { #[inline] fn len(&self) -> usize { self.inner.len() } } impl<'a, K, V> Iterator for Values<'a, K, V> { type Item = &'a V; #[inline] fn next(&mut self) -> Option<&'a V> { self.inner.next().map(|(_, v)| v) } #[inline] fn size_hint(&self) -> (usize, Option) { self.inner.size_hint() } } impl<'a, K, V> ExactSizeIterator for Values<'a, K, V> { #[inline] fn len(&self) -> usize { self.inner.len() } } impl<'a, K, V> Iterator for ValuesMut<'a, K, V> { type Item = &'a mut V; #[inline] fn next(&mut self) -> Option<&'a mut V> { self.inner.next().map(|(_, v)| v) } #[inline] fn size_hint(&self) -> (usize, Option) { self.inner.size_hint() } } impl<'a, K, V> ExactSizeIterator for ValuesMut<'a, K, V> { #[inline] fn len(&self) -> usize { self.inner.len() } } impl<'a, K, V> fmt::Debug for ValuesMut<'a, K, V> where K: fmt::Debug, V: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_list().entries(self.inner.inner.iter()).finish() } } impl<'a, K, V> Iterator for Drain<'a, K, V> { type Item = (K, V); #[inline] fn next(&mut self) -> Option<(K, V)> { self.inner.next().map(|(_, k, v)| (k, v)) } #[inline] fn size_hint(&self) -> (usize, Option) { self.inner.size_hint() } } impl<'a, K, V> ExactSizeIterator for Drain<'a, K, V> { #[inline] fn len(&self) -> usize { self.inner.len() } } impl<'a, K, V> fmt::Debug for Drain<'a, K, V> where K: fmt::Debug, V: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_list().entries(self.inner.iter()).finish() } } // FORK NOTE: Removed Placer impl impl<'a, K, V> Entry<'a, K, V> { /// Ensures a value is in the entry by inserting the default if empty, and returns /// a mutable reference to the value in the entry. /// /// # Examples /// /// ``` /// use std::collections::HashMap; /// /// let mut map: HashMap<&str, u32> = HashMap::new(); /// map.entry("poneyland").or_insert(12); /// /// assert_eq!(map["poneyland"], 12); /// /// *map.entry("poneyland").or_insert(12) += 10; /// assert_eq!(map["poneyland"], 22); /// ``` pub fn or_insert(self, default: V) -> &'a mut V { match self { Occupied(entry) => entry.into_mut(), Vacant(entry) => entry.insert(default), } } /// Ensures a value is in the entry by inserting the result of the default function if empty, /// and returns a mutable reference to the value in the entry. /// /// # Examples /// /// ``` /// use std::collections::HashMap; /// /// let mut map: HashMap<&str, String> = HashMap::new(); /// let s = "hoho".to_string(); /// /// map.entry("poneyland").or_insert_with(|| s); /// /// assert_eq!(map["poneyland"], "hoho".to_string()); /// ``` pub fn or_insert_with V>(self, default: F) -> &'a mut V { match self { Occupied(entry) => entry.into_mut(), Vacant(entry) => entry.insert(default()), } } /// Returns a reference to this entry's key. /// /// # Examples /// /// ``` /// use std::collections::HashMap; /// /// let mut map: HashMap<&str, u32> = HashMap::new(); /// assert_eq!(map.entry("poneyland").key(), &"poneyland"); /// ``` pub fn key(&self) -> &K { match *self { Occupied(ref entry) => entry.key(), Vacant(ref entry) => entry.key(), } } } impl<'a, K, V> OccupiedEntry<'a, K, V> { /// Gets a reference to the key in the entry. /// /// # Examples /// /// ``` /// use std::collections::HashMap; /// /// let mut map: HashMap<&str, u32> = HashMap::new(); /// map.entry("poneyland").or_insert(12); /// assert_eq!(map.entry("poneyland").key(), &"poneyland"); /// ``` pub fn key(&self) -> &K { self.elem.read().0 } /// Take the ownership of the key and value from the map. /// /// # Examples /// /// ``` /// use std::collections::HashMap; /// use std::collections::hash_map::Entry; /// /// let mut map: HashMap<&str, u32> = HashMap::new(); /// map.entry("poneyland").or_insert(12); /// /// if let Entry::Occupied(o) = map.entry("poneyland") { /// // We delete the entry from the map. /// o.remove_entry(); /// } /// /// assert_eq!(map.contains_key("poneyland"), false); /// ``` pub fn remove_entry(self) -> (K, V) { let (k, v, _) = pop_internal(self.elem); (k, v) } /// Gets a reference to the value in the entry. /// /// # Examples /// /// ``` /// use std::collections::HashMap; /// use std::collections::hash_map::Entry; /// /// let mut map: HashMap<&str, u32> = HashMap::new(); /// map.entry("poneyland").or_insert(12); /// /// if let Entry::Occupied(o) = map.entry("poneyland") { /// assert_eq!(o.get(), &12); /// } /// ``` pub fn get(&self) -> &V { self.elem.read().1 } /// Gets a mutable reference to the value in the entry. /// /// # Examples /// /// ``` /// use std::collections::HashMap; /// use std::collections::hash_map::Entry; /// /// let mut map: HashMap<&str, u32> = HashMap::new(); /// map.entry("poneyland").or_insert(12); /// /// assert_eq!(map["poneyland"], 12); /// if let Entry::Occupied(mut o) = map.entry("poneyland") { /// *o.get_mut() += 10; /// } /// /// assert_eq!(map["poneyland"], 22); /// ``` pub fn get_mut(&mut self) -> &mut V { self.elem.read_mut().1 } /// Converts the OccupiedEntry into a mutable reference to the value in the entry /// with a lifetime bound to the map itself. /// /// # Examples /// /// ``` /// use std::collections::HashMap; /// use std::collections::hash_map::Entry; /// /// let mut map: HashMap<&str, u32> = HashMap::new(); /// map.entry("poneyland").or_insert(12); /// /// assert_eq!(map["poneyland"], 12); /// if let Entry::Occupied(o) = map.entry("poneyland") { /// *o.into_mut() += 10; /// } /// /// assert_eq!(map["poneyland"], 22); /// ``` pub fn into_mut(self) -> &'a mut V { self.elem.into_mut_refs().1 } /// Sets the value of the entry, and returns the entry's old value. /// /// # Examples /// /// ``` /// use std::collections::HashMap; /// use std::collections::hash_map::Entry; /// /// let mut map: HashMap<&str, u32> = HashMap::new(); /// map.entry("poneyland").or_insert(12); /// /// if let Entry::Occupied(mut o) = map.entry("poneyland") { /// assert_eq!(o.insert(15), 12); /// } /// /// assert_eq!(map["poneyland"], 15); /// ``` pub fn insert(&mut self, mut value: V) -> V { let old_value = self.get_mut(); mem::swap(&mut value, old_value); value } /// Takes the value out of the entry, and returns it. /// /// # Examples /// /// ``` /// use std::collections::HashMap; /// use std::collections::hash_map::Entry; /// /// let mut map: HashMap<&str, u32> = HashMap::new(); /// map.entry("poneyland").or_insert(12); /// /// if let Entry::Occupied(o) = map.entry("poneyland") { /// assert_eq!(o.remove(), 12); /// } /// /// assert_eq!(map.contains_key("poneyland"), false); /// ``` pub fn remove(self) -> V { pop_internal(self.elem).1 } /// Returns a key that was used for search. /// /// The key was retained for further use. fn take_key(&mut self) -> Option { self.key.take() } } impl<'a, K: 'a, V: 'a> VacantEntry<'a, K, V> { /// Gets a reference to the key that would be used when inserting a value /// through the `VacantEntry`. /// /// # Examples /// /// ``` /// use std::collections::HashMap; /// /// let mut map: HashMap<&str, u32> = HashMap::new(); /// assert_eq!(map.entry("poneyland").key(), &"poneyland"); /// ``` pub fn key(&self) -> &K { &self.key } /// Take ownership of the key. /// /// # Examples /// /// ``` /// use std::collections::HashMap; /// use std::collections::hash_map::Entry; /// /// let mut map: HashMap<&str, u32> = HashMap::new(); /// /// if let Entry::Vacant(v) = map.entry("poneyland") { /// v.into_key(); /// } /// ``` pub fn into_key(self) -> K { self.key } /// Sets the value of the entry with the VacantEntry's key, /// and returns a mutable reference to it. /// /// # Examples /// /// ``` /// use std::collections::HashMap; /// use std::collections::hash_map::Entry; /// /// let mut map: HashMap<&str, u32> = HashMap::new(); /// /// if let Entry::Vacant(o) = map.entry("poneyland") { /// o.insert(37); /// } /// assert_eq!(map["poneyland"], 37); /// ``` pub fn insert(self, value: V) -> &'a mut V { let b = match self.elem { NeqElem(mut bucket, disp) => { if disp >= DISPLACEMENT_THRESHOLD { bucket.table_mut().set_tag(true); } robin_hood(bucket, disp, self.hash, self.key, value) }, NoElem(mut bucket, disp) => { if disp >= DISPLACEMENT_THRESHOLD { bucket.table_mut().set_tag(true); } bucket.put(self.hash, self.key, value) }, }; b.into_mut_refs().1 } } impl FromIterator<(K, V)> for HashMap where K: Eq + Hash, S: BuildHasher + Default, { fn from_iter>(iter: T) -> HashMap { let mut map = HashMap::with_hasher(Default::default()); map.extend(iter); map } } impl Extend<(K, V)> for HashMap where K: Eq + Hash, S: BuildHasher, { fn extend>(&mut self, iter: T) { // Keys may be already present or show multiple times in the iterator. // Reserve the entire hint lower bound if the map is empty. // Otherwise reserve half the hint (rounded up), so the map // will only resize twice in the worst case. let iter = iter.into_iter(); let reserve = if self.is_empty() { iter.size_hint().0 } else { (iter.size_hint().0 + 1) / 2 }; self.reserve(reserve); for (k, v) in iter { self.insert(k, v); } } } impl<'a, K, V, S> Extend<(&'a K, &'a V)> for HashMap where K: Eq + Hash + Copy, V: Copy, S: BuildHasher, { fn extend>(&mut self, iter: T) { self.extend(iter.into_iter().map(|(&key, &value)| (key, value))); } } // FORK NOTE: These can be reused pub use std::collections::hash_map::{DefaultHasher, RandomState}; impl super::Recover for HashMap where K: Eq + Hash + Borrow, S: BuildHasher, Q: Eq + Hash, { type Key = K; fn get(&self, key: &Q) -> Option<&K> { self.search(key) .into_occupied_bucket() .map(|bucket| bucket.into_refs().0) } fn take(&mut self, key: &Q) -> Option { if self.table.size() == 0 { return None; } self.search_mut(key) .into_occupied_bucket() .map(|bucket| pop_internal(bucket).0) } fn replace(&mut self, key: K) -> Option { self.reserve(1); match self.entry(key) { Occupied(mut occupied) => { let key = occupied.take_key().unwrap(); Some(mem::replace(occupied.elem.read_mut().0, key)) }, Vacant(vacant) => { vacant.insert(()); None }, } } } #[allow(dead_code)] fn assert_covariance() { fn map_key<'new>(v: HashMap<&'static str, u8>) -> HashMap<&'new str, u8> { v } fn map_val<'new>(v: HashMap) -> HashMap { v } fn iter_key<'a, 'new>(v: Iter<'a, &'static str, u8>) -> Iter<'a, &'new str, u8> { v } fn iter_val<'a, 'new>(v: Iter<'a, u8, &'static str>) -> Iter<'a, u8, &'new str> { v } fn into_iter_key<'new>(v: IntoIter<&'static str, u8>) -> IntoIter<&'new str, u8> { v } fn into_iter_val<'new>(v: IntoIter) -> IntoIter { v } fn keys_key<'a, 'new>(v: Keys<'a, &'static str, u8>) -> Keys<'a, &'new str, u8> { v } fn keys_val<'a, 'new>(v: Keys<'a, u8, &'static str>) -> Keys<'a, u8, &'new str> { v } fn values_key<'a, 'new>(v: Values<'a, &'static str, u8>) -> Values<'a, &'new str, u8> { v } fn values_val<'a, 'new>(v: Values<'a, u8, &'static str>) -> Values<'a, u8, &'new str> { v } fn drain<'new>( d: Drain<'static, &'static str, &'static str>, ) -> Drain<'new, &'new str, &'new str> { d } } #[cfg(test)] mod test_map { extern crate rand; use self::rand::{thread_rng, Rng}; use super::Entry::{Occupied, Vacant}; use super::HashMap; use super::RandomState; use cell::RefCell; #[test] fn test_zero_capacities() { type HM = HashMap; let m = HM::new(); assert_eq!(m.capacity(), 0); let m = HM::default(); assert_eq!(m.capacity(), 0); let m = HM::with_hasher(RandomState::new()); assert_eq!(m.capacity(), 0); let m = HM::with_capacity(0); assert_eq!(m.capacity(), 0); let m = HM::with_capacity_and_hasher(0, RandomState::new()); assert_eq!(m.capacity(), 0); let mut m = HM::new(); m.insert(1, 1); m.insert(2, 2); m.remove(&1); m.remove(&2); m.shrink_to_fit(); assert_eq!(m.capacity(), 0); let mut m = HM::new(); m.reserve(0); assert_eq!(m.capacity(), 0); } #[test] fn test_create_capacity_zero() { let mut m = HashMap::with_capacity(0); assert!(m.insert(1, 1).is_none()); assert!(m.contains_key(&1)); assert!(!m.contains_key(&0)); } #[test] fn test_insert() { let mut m = HashMap::new(); assert_eq!(m.len(), 0); assert!(m.insert(1, 2).is_none()); assert_eq!(m.len(), 1); assert!(m.insert(2, 4).is_none()); assert_eq!(m.len(), 2); assert_eq!(*m.get(&1).unwrap(), 2); assert_eq!(*m.get(&2).unwrap(), 4); } #[test] fn test_clone() { let mut m = HashMap::new(); assert_eq!(m.len(), 0); assert!(m.insert(1, 2).is_none()); assert_eq!(m.len(), 1); assert!(m.insert(2, 4).is_none()); assert_eq!(m.len(), 2); let m2 = m.clone(); assert_eq!(*m2.get(&1).unwrap(), 2); assert_eq!(*m2.get(&2).unwrap(), 4); assert_eq!(m2.len(), 2); } thread_local! { static DROP_VECTOR: RefCell> = RefCell::new(Vec::new()) } #[derive(Hash, PartialEq, Eq)] struct Dropable { k: usize, } impl Dropable { fn new(k: usize) -> Dropable { DROP_VECTOR.with(|slot| { slot.borrow_mut()[k] += 1; }); Dropable { k: k } } } impl Drop for Dropable { fn drop(&mut self) { DROP_VECTOR.with(|slot| { slot.borrow_mut()[self.k] -= 1; }); } } impl Clone for Dropable { fn clone(&self) -> Dropable { Dropable::new(self.k) } } #[test] fn test_drops() { DROP_VECTOR.with(|slot| { *slot.borrow_mut() = vec![0; 200]; }); { let mut m = HashMap::new(); DROP_VECTOR.with(|v| { for i in 0..200 { assert_eq!(v.borrow()[i], 0); } }); for i in 0..100 { let d1 = Dropable::new(i); let d2 = Dropable::new(i + 100); m.insert(d1, d2); } DROP_VECTOR.with(|v| { for i in 0..200 { assert_eq!(v.borrow()[i], 1); } }); for i in 0..50 { let k = Dropable::new(i); let v = m.remove(&k); assert!(v.is_some()); DROP_VECTOR.with(|v| { assert_eq!(v.borrow()[i], 1); assert_eq!(v.borrow()[i + 100], 1); }); } DROP_VECTOR.with(|v| { for i in 0..50 { assert_eq!(v.borrow()[i], 0); assert_eq!(v.borrow()[i + 100], 0); } for i in 50..100 { assert_eq!(v.borrow()[i], 1); assert_eq!(v.borrow()[i + 100], 1); } }); } DROP_VECTOR.with(|v| { for i in 0..200 { assert_eq!(v.borrow()[i], 0); } }); } #[test] fn test_into_iter_drops() { DROP_VECTOR.with(|v| { *v.borrow_mut() = vec![0; 200]; }); let hm = { let mut hm = HashMap::new(); DROP_VECTOR.with(|v| { for i in 0..200 { assert_eq!(v.borrow()[i], 0); } }); for i in 0..100 { let d1 = Dropable::new(i); let d2 = Dropable::new(i + 100); hm.insert(d1, d2); } DROP_VECTOR.with(|v| { for i in 0..200 { assert_eq!(v.borrow()[i], 1); } }); hm }; // By the way, ensure that cloning doesn't screw up the dropping. drop(hm.clone()); { let mut half = hm.into_iter().take(50); DROP_VECTOR.with(|v| { for i in 0..200 { assert_eq!(v.borrow()[i], 1); } }); for _ in half.by_ref() {} DROP_VECTOR.with(|v| { let nk = (0..100).filter(|&i| v.borrow()[i] == 1).count(); let nv = (0..100).filter(|&i| v.borrow()[i + 100] == 1).count(); assert_eq!(nk, 50); assert_eq!(nv, 50); }); }; DROP_VECTOR.with(|v| { for i in 0..200 { assert_eq!(v.borrow()[i], 0); } }); } #[test] fn test_empty_remove() { let mut m: HashMap = HashMap::new(); assert_eq!(m.remove(&0), None); } #[test] fn test_empty_entry() { let mut m: HashMap = HashMap::new(); match m.entry(0) { Occupied(_) => panic!(), Vacant(_) => {}, } assert!(*m.entry(0).or_insert(true)); assert_eq!(m.len(), 1); } #[test] fn test_empty_iter() { let mut m: HashMap = HashMap::new(); assert_eq!(m.drain().next(), None); assert_eq!(m.keys().next(), None); assert_eq!(m.values().next(), None); assert_eq!(m.values_mut().next(), None); assert_eq!(m.iter().next(), None); assert_eq!(m.iter_mut().next(), None); assert_eq!(m.len(), 0); assert!(m.is_empty()); assert_eq!(m.into_iter().next(), None); } #[test] fn test_lots_of_insertions() { let mut m = HashMap::new(); // Try this a few times to make sure we never screw up the hashmap's // internal state. for _ in 0..10 { assert!(m.is_empty()); for i in 1..1001 { assert!(m.insert(i, i).is_none()); for j in 1..i + 1 { let r = m.get(&j); assert_eq!(r, Some(&j)); } for j in i + 1..1001 { let r = m.get(&j); assert_eq!(r, None); } } for i in 1001..2001 { assert!(!m.contains_key(&i)); } // remove forwards for i in 1..1001 { assert!(m.remove(&i).is_some()); for j in 1..i + 1 { assert!(!m.contains_key(&j)); } for j in i + 1..1001 { assert!(m.contains_key(&j)); } } for i in 1..1001 { assert!(!m.contains_key(&i)); } for i in 1..1001 { assert!(m.insert(i, i).is_none()); } // remove backwards for i in (1..1001).rev() { assert!(m.remove(&i).is_some()); for j in i..1001 { assert!(!m.contains_key(&j)); } for j in 1..i { assert!(m.contains_key(&j)); } } } } #[test] fn test_find_mut() { let mut m = HashMap::new(); assert!(m.insert(1, 12).is_none()); assert!(m.insert(2, 8).is_none()); assert!(m.insert(5, 14).is_none()); let new = 100; match m.get_mut(&5) { None => panic!(), Some(x) => *x = new, } assert_eq!(m.get(&5), Some(&new)); } #[test] fn test_insert_overwrite() { let mut m = HashMap::new(); assert!(m.insert(1, 2).is_none()); assert_eq!(*m.get(&1).unwrap(), 2); assert!(!m.insert(1, 3).is_none()); assert_eq!(*m.get(&1).unwrap(), 3); } #[test] fn test_insert_conflicts() { let mut m = HashMap::with_capacity(4); assert!(m.insert(1, 2).is_none()); assert!(m.insert(5, 3).is_none()); assert!(m.insert(9, 4).is_none()); assert_eq!(*m.get(&9).unwrap(), 4); assert_eq!(*m.get(&5).unwrap(), 3); assert_eq!(*m.get(&1).unwrap(), 2); } #[test] fn test_conflict_remove() { let mut m = HashMap::with_capacity(4); assert!(m.insert(1, 2).is_none()); assert_eq!(*m.get(&1).unwrap(), 2); assert!(m.insert(5, 3).is_none()); assert_eq!(*m.get(&1).unwrap(), 2); assert_eq!(*m.get(&5).unwrap(), 3); assert!(m.insert(9, 4).is_none()); assert_eq!(*m.get(&1).unwrap(), 2); assert_eq!(*m.get(&5).unwrap(), 3); assert_eq!(*m.get(&9).unwrap(), 4); assert!(m.remove(&1).is_some()); assert_eq!(*m.get(&9).unwrap(), 4); assert_eq!(*m.get(&5).unwrap(), 3); } #[test] fn test_is_empty() { let mut m = HashMap::with_capacity(4); assert!(m.insert(1, 2).is_none()); assert!(!m.is_empty()); assert!(m.remove(&1).is_some()); assert!(m.is_empty()); } #[test] fn test_pop() { let mut m = HashMap::new(); m.insert(1, 2); assert_eq!(m.remove(&1), Some(2)); assert_eq!(m.remove(&1), None); } #[test] fn test_iterate() { let mut m = HashMap::with_capacity(4); for i in 0..32 { assert!(m.insert(i, i * 2).is_none()); } assert_eq!(m.len(), 32); let mut observed: u32 = 0; for (k, v) in &m { assert_eq!(*v, *k * 2); observed |= 1 << *k; } assert_eq!(observed, 0xFFFF_FFFF); } #[test] fn test_keys() { let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; let map: HashMap<_, _> = vec.into_iter().collect(); let keys: Vec<_> = map.keys().cloned().collect(); assert_eq!(keys.len(), 3); assert!(keys.contains(&1)); assert!(keys.contains(&2)); assert!(keys.contains(&3)); } #[test] fn test_values() { let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; let map: HashMap<_, _> = vec.into_iter().collect(); let values: Vec<_> = map.values().cloned().collect(); assert_eq!(values.len(), 3); assert!(values.contains(&'a')); assert!(values.contains(&'b')); assert!(values.contains(&'c')); } #[test] fn test_values_mut() { let vec = vec![(1, 1), (2, 2), (3, 3)]; let mut map: HashMap<_, _> = vec.into_iter().collect(); for value in map.values_mut() { *value = (*value) * 2 } let values: Vec<_> = map.values().cloned().collect(); assert_eq!(values.len(), 3); assert!(values.contains(&2)); assert!(values.contains(&4)); assert!(values.contains(&6)); } #[test] fn test_find() { let mut m = HashMap::new(); assert!(m.get(&1).is_none()); m.insert(1, 2); match m.get(&1) { None => panic!(), Some(v) => assert_eq!(*v, 2), } } #[test] fn test_eq() { let mut m1 = HashMap::new(); m1.insert(1, 2); m1.insert(2, 3); m1.insert(3, 4); let mut m2 = HashMap::new(); m2.insert(1, 2); m2.insert(2, 3); assert_ne!(m1, m2); m2.insert(3, 4); assert_eq!(m1, m2); } #[test] fn test_show() { let mut map = HashMap::new(); let empty: HashMap = HashMap::new(); map.insert(1, 2); map.insert(3, 4); let map_str = format!("{:?}", map); assert!(map_str == "{1: 2, 3: 4}" || map_str == "{3: 4, 1: 2}"); assert_eq!(format!("{:?}", empty), "{}"); } #[test] fn test_expand() { let mut m = HashMap::new(); assert_eq!(m.len(), 0); assert!(m.is_empty()); let mut i = 0; let old_raw_cap = m.raw_capacity(); while old_raw_cap == m.raw_capacity() { m.insert(i, i); i += 1; } assert_eq!(m.len(), i); assert!(!m.is_empty()); } #[test] fn test_behavior_resize_policy() { let mut m = HashMap::new(); assert_eq!(m.len(), 0); assert_eq!(m.raw_capacity(), 0); assert!(m.is_empty()); m.insert(0, 0); m.remove(&0); assert!(m.is_empty()); let initial_raw_cap = m.raw_capacity(); m.reserve(initial_raw_cap); let raw_cap = m.raw_capacity(); assert_eq!(raw_cap, initial_raw_cap * 2); let mut i = 0; for _ in 0..raw_cap * 3 / 4 { m.insert(i, i); i += 1; } // three quarters full assert_eq!(m.len(), i); assert_eq!(m.raw_capacity(), raw_cap); for _ in 0..raw_cap / 4 { m.insert(i, i); i += 1; } // half full let new_raw_cap = m.raw_capacity(); assert_eq!(new_raw_cap, raw_cap * 2); for _ in 0..raw_cap / 2 - 1 { i -= 1; m.remove(&i); assert_eq!(m.raw_capacity(), new_raw_cap); } // A little more than one quarter full. m.shrink_to_fit(); assert_eq!(m.raw_capacity(), raw_cap); // again, a little more than half full for _ in 0..raw_cap / 2 - 1 { i -= 1; m.remove(&i); } m.shrink_to_fit(); assert_eq!(m.len(), i); assert!(!m.is_empty()); assert_eq!(m.raw_capacity(), initial_raw_cap); } #[test] fn test_reserve_shrink_to_fit() { let mut m = HashMap::new(); m.insert(0, 0); m.remove(&0); assert!(m.capacity() >= m.len()); for i in 0..128 { m.insert(i, i); } m.reserve(256); let usable_cap = m.capacity(); for i in 128..(128 + 256) { m.insert(i, i); assert_eq!(m.capacity(), usable_cap); } for i in 100..(128 + 256) { assert_eq!(m.remove(&i), Some(i)); } m.shrink_to_fit(); assert_eq!(m.len(), 100); assert!(!m.is_empty()); assert!(m.capacity() >= m.len()); for i in 0..100 { assert_eq!(m.remove(&i), Some(i)); } m.shrink_to_fit(); m.insert(0, 0); assert_eq!(m.len(), 1); assert!(m.capacity() >= m.len()); assert_eq!(m.remove(&0), Some(0)); } #[test] fn test_from_iter() { let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]; let map: HashMap<_, _> = xs.iter().cloned().collect(); for &(k, v) in &xs { assert_eq!(map.get(&k), Some(&v)); } } #[test] fn test_size_hint() { let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]; let map: HashMap<_, _> = xs.iter().cloned().collect(); let mut iter = map.iter(); for _ in iter.by_ref().take(3) {} assert_eq!(iter.size_hint(), (3, Some(3))); } #[test] fn test_iter_len() { let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]; let map: HashMap<_, _> = xs.iter().cloned().collect(); let mut iter = map.iter(); for _ in iter.by_ref().take(3) {} assert_eq!(iter.len(), 3); } #[test] fn test_mut_size_hint() { let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]; let mut map: HashMap<_, _> = xs.iter().cloned().collect(); let mut iter = map.iter_mut(); for _ in iter.by_ref().take(3) {} assert_eq!(iter.size_hint(), (3, Some(3))); } #[test] fn test_iter_mut_len() { let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]; let mut map: HashMap<_, _> = xs.iter().cloned().collect(); let mut iter = map.iter_mut(); for _ in iter.by_ref().take(3) {} assert_eq!(iter.len(), 3); } #[test] fn test_index() { let mut map = HashMap::new(); map.insert(1, 2); map.insert(2, 1); map.insert(3, 4); assert_eq!(map[&2], 1); } #[test] #[should_panic] fn test_index_nonexistent() { let mut map = HashMap::new(); map.insert(1, 2); map.insert(2, 1); map.insert(3, 4); map[&4]; } #[test] fn test_entry() { let xs = [(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)]; let mut map: HashMap<_, _> = xs.iter().cloned().collect(); // Existing key (insert) match map.entry(1) { Vacant(_) => unreachable!(), Occupied(mut view) => { assert_eq!(view.get(), &10); assert_eq!(view.insert(100), 10); }, } assert_eq!(map.get(&1).unwrap(), &100); assert_eq!(map.len(), 6); // Existing key (update) match map.entry(2) { Vacant(_) => unreachable!(), Occupied(mut view) => { let v = view.get_mut(); let new_v = (*v) * 10; *v = new_v; }, } assert_eq!(map.get(&2).unwrap(), &200); assert_eq!(map.len(), 6); // Existing key (take) match map.entry(3) { Vacant(_) => unreachable!(), Occupied(view) => { assert_eq!(view.remove(), 30); }, } assert_eq!(map.get(&3), None); assert_eq!(map.len(), 5); // Inexistent key (insert) match map.entry(10) { Occupied(_) => unreachable!(), Vacant(view) => { assert_eq!(*view.insert(1000), 1000); }, } assert_eq!(map.get(&10).unwrap(), &1000); assert_eq!(map.len(), 6); } #[test] fn test_entry_take_doesnt_corrupt() { #![allow(deprecated)] //rand // Test for #19292 fn check(m: &HashMap) { for k in m.keys() { assert!(m.contains_key(k), "{} is in keys() but not in the map?", k); } } let mut m = HashMap::new(); let mut rng = thread_rng(); // Populate the map with some items. for _ in 0..50 { let x = rng.gen_range(-10, 10); m.insert(x, ()); } for i in 0..1000 { let x = rng.gen_range(-10, 10); match m.entry(x) { Vacant(_) => {}, Occupied(e) => { println!("{}: remove {}", i, x); e.remove(); }, } check(&m); } } #[test] fn test_extend_ref() { let mut a = HashMap::new(); a.insert(1, "one"); let mut b = HashMap::new(); b.insert(2, "two"); b.insert(3, "three"); a.extend(&b); assert_eq!(a.len(), 3); assert_eq!(a[&1], "one"); assert_eq!(a[&2], "two"); assert_eq!(a[&3], "three"); } #[test] fn test_capacity_not_less_than_len() { let mut a = HashMap::new(); let mut item = 0; for _ in 0..116 { a.insert(item, 0); item += 1; } assert!(a.capacity() > a.len()); let free = a.capacity() - a.len(); for _ in 0..free { a.insert(item, 0); item += 1; } assert_eq!(a.len(), a.capacity()); // Insert at capacity should cause allocation. a.insert(item, 0); assert!(a.capacity() > a.len()); } #[test] fn test_occupied_entry_key() { let mut a = HashMap::new(); let key = "hello there"; let value = "value goes here"; assert!(a.is_empty()); a.insert(key.clone(), value.clone()); assert_eq!(a.len(), 1); assert_eq!(a[key], value); match a.entry(key.clone()) { Vacant(_) => panic!(), Occupied(e) => assert_eq!(key, *e.key()), } assert_eq!(a.len(), 1); assert_eq!(a[key], value); } #[test] fn test_vacant_entry_key() { let mut a = HashMap::new(); let key = "hello there"; let value = "value goes here"; assert!(a.is_empty()); match a.entry(key.clone()) { Occupied(_) => panic!(), Vacant(e) => { assert_eq!(key, *e.key()); e.insert(value.clone()); }, } assert_eq!(a.len(), 1); assert_eq!(a[key], value); } #[test] fn test_retain() { let mut map: HashMap = (0..100).map(|x| (x, x * 10)).collect(); map.retain(|&k, _| k % 2 == 0); assert_eq!(map.len(), 50); assert_eq!(map[&2], 20); assert_eq!(map[&4], 40); assert_eq!(map[&6], 60); } #[test] fn test_adaptive() { const TEST_LEN: usize = 5000; // by cloning we get maps with the same hasher seed let mut first = HashMap::new(); let mut second = first.clone(); first.extend((0..TEST_LEN).map(|i| (i, i))); second.extend((TEST_LEN..TEST_LEN * 2).map(|i| (i, i))); for (&k, &v) in &second { let prev_cap = first.capacity(); let expect_grow = first.len() == prev_cap; first.insert(k, v); if !expect_grow && first.capacity() != prev_cap { return; } } panic!("Adaptive early resize failed"); } }