summaryrefslogtreecommitdiffstats
path: root/library/alloc/src/collections/btree
diff options
context:
space:
mode:
Diffstat (limited to 'library/alloc/src/collections/btree')
-rw-r--r--library/alloc/src/collections/btree/map.rs4
-rw-r--r--library/alloc/src/collections/btree/node/tests.rs1
2 files changed, 3 insertions, 2 deletions
diff --git a/library/alloc/src/collections/btree/map.rs b/library/alloc/src/collections/btree/map.rs
index 8a7719347..1d9c4460e 100644
--- a/library/alloc/src/collections/btree/map.rs
+++ b/library/alloc/src/collections/btree/map.rs
@@ -46,8 +46,8 @@ pub(super) const MIN_LEN: usize = node::MIN_LEN_AFTER_SPLIT;
/// is done is *very* inefficient for modern computer architectures. In particular, every element
/// is stored in its own individually heap-allocated node. This means that every single insertion
/// triggers a heap-allocation, and every single comparison should be a cache-miss. Since these
-/// are both notably expensive things to do in practice, we are forced to at very least reconsider
-/// the BST strategy.
+/// are both notably expensive things to do in practice, we are forced to, at the very least,
+/// reconsider the BST strategy.
///
/// A B-Tree instead makes each node contain B-1 to 2B-1 elements in a contiguous array. By doing
/// this, we reduce the number of allocations by a factor of B, and improve cache efficiency in
diff --git a/library/alloc/src/collections/btree/node/tests.rs b/library/alloc/src/collections/btree/node/tests.rs
index aadb0dc9c..64bce0ff8 100644
--- a/library/alloc/src/collections/btree/node/tests.rs
+++ b/library/alloc/src/collections/btree/node/tests.rs
@@ -94,6 +94,7 @@ fn test_partial_eq() {
#[test]
#[cfg(target_arch = "x86_64")]
+#[cfg_attr(miri, ignore)] // We'd like to run Miri with layout randomization
fn test_sizes() {
assert_eq!(core::mem::size_of::<LeafNode<(), ()>>(), 16);
assert_eq!(core::mem::size_of::<LeafNode<i64, i64>>(), 16 + CAPACITY * 2 * 8);