summaryrefslogtreecommitdiffstats
path: root/vendor/memmap2/src
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-30 18:31:44 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-30 18:31:44 +0000
commitc23a457e72abe608715ac76f076f47dc42af07a5 (patch)
tree2772049aaf84b5c9d0ed12ec8d86812f7a7904b6 /vendor/memmap2/src
parentReleasing progress-linux version 1.73.0+dfsg1-1~progress7.99u1. (diff)
downloadrustc-c23a457e72abe608715ac76f076f47dc42af07a5.tar.xz
rustc-c23a457e72abe608715ac76f076f47dc42af07a5.zip
Merging upstream version 1.74.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'vendor/memmap2/src')
-rw-r--r--vendor/memmap2/src/advice.rs67
-rw-r--r--vendor/memmap2/src/lib.rs280
-rw-r--r--vendor/memmap2/src/unix.rs167
-rw-r--r--vendor/memmap2/src/windows.rs7
4 files changed, 483 insertions, 38 deletions
diff --git a/vendor/memmap2/src/advice.rs b/vendor/memmap2/src/advice.rs
index 185743e07..0181615ef 100644
--- a/vendor/memmap2/src/advice.rs
+++ b/vendor/memmap2/src/advice.rs
@@ -241,6 +241,73 @@ pub enum Advice {
#[cfg(target_os = "linux")]
HwPoison = libc::MADV_HWPOISON,
+ /// **MADV_POPULATE_READ** - Linux only (since Linux 5.14)
+ ///
+ /// Populate (prefault) page tables readable, faulting in all
+ /// pages in the range just as if manually reading from each
+ /// page; however, avoid the actual memory access that would have
+ /// been performed after handling the fault.
+ ///
+ /// In contrast to MAP_POPULATE, MADV_POPULATE_READ does not hide
+ /// errors, can be applied to (parts of) existing mappings and
+ /// will always populate (prefault) page tables readable. One
+ /// example use case is prefaulting a file mapping, reading all
+ /// file content from disk; however, pages won't be dirtied and
+ /// consequently won't have to be written back to disk when
+ /// evicting the pages from memory.
+ ///
+ /// Depending on the underlying mapping, map the shared zeropage,
+ /// preallocate memory or read the underlying file; files with
+ /// holes might or might not preallocate blocks. If populating
+ /// fails, a SIGBUS signal is not generated; instead, an error is
+ /// returned.
+ ///
+ /// If MADV_POPULATE_READ succeeds, all page tables have been
+ /// populated (prefaulted) readable once. If MADV_POPULATE_READ
+ /// fails, some page tables might have been populated.
+ ///
+ /// MADV_POPULATE_READ cannot be applied to mappings without read
+ /// permissions and special mappings, for example, mappings
+ /// marked with kernel-internal flags such as VM_PFNMAP or VM_IO,
+ /// or secret memory regions created using memfd_secret(2).
+ ///
+ /// Note that with MADV_POPULATE_READ, the process can be killed
+ /// at any moment when the system runs out of memory.
+ #[cfg(target_os = "linux")]
+ PopulateRead = libc::MADV_POPULATE_READ,
+
+ /// **MADV_POPULATE_WRITE** - Linux only (since Linux 5.14)
+ ///
+ /// Populate (prefault) page tables writable, faulting in all
+ /// pages in the range just as if manually writing to each each
+ /// page; however, avoid the actual memory access that would have
+ /// been performed after handling the fault.
+ ///
+ /// In contrast to MAP_POPULATE, MADV_POPULATE_WRITE does not
+ /// hide errors, can be applied to (parts of) existing mappings
+ /// and will always populate (prefault) page tables writable.
+ /// One example use case is preallocating memory, breaking any
+ /// CoW (Copy on Write).
+ ///
+ /// Depending on the underlying mapping, preallocate memory or
+ /// read the underlying file; files with holes will preallocate
+ /// blocks. If populating fails, a SIGBUS signal is not gener‐
+ /// ated; instead, an error is returned.
+ ///
+ /// If MADV_POPULATE_WRITE succeeds, all page tables have been
+ /// populated (prefaulted) writable once. If MADV_POPULATE_WRITE
+ /// fails, some page tables might have been populated.
+ ///
+ /// MADV_POPULATE_WRITE cannot be applied to mappings without
+ /// write permissions and special mappings, for example, mappings
+ /// marked with kernel-internal flags such as VM_PFNMAP or VM_IO,
+ /// or secret memory regions created using memfd_secret(2).
+ ///
+ /// Note that with MADV_POPULATE_WRITE, the process can be killed
+ /// at any moment when the system runs out of memory.
+ #[cfg(target_os = "linux")]
+ PopulateWrite = libc::MADV_POPULATE_WRITE,
+
/// **MADV_ZERO_WIRED_PAGES** - Darwin only
///
/// Indicates that the application would like the wired pages in this address range to be
diff --git a/vendor/memmap2/src/lib.rs b/vendor/memmap2/src/lib.rs
index 2d730ae90..dd99ba12e 100644
--- a/vendor/memmap2/src/lib.rs
+++ b/vendor/memmap2/src/lib.rs
@@ -4,7 +4,7 @@
//! which correspond to mapping a [`File`] to a [`&[u8]`](https://doc.rust-lang.org/std/primitive.slice.html)
//! or [`&mut [u8]`](https://doc.rust-lang.org/std/primitive.slice.html)
//! respectively. Both function by dereferencing to a slice, allowing the
-//! [`Mmap`]/[`MmapMut`] to be used in the same way you would the equivelant slice
+//! [`Mmap`]/[`MmapMut`] to be used in the same way you would the equivalent slice
//! types.
//!
//! [`File`]: std::fs::File
@@ -497,6 +497,21 @@ impl MmapOptions {
MmapInner::map_mut(self.get_len(&file)?, desc.0, self.offset, self.populate)
.map(|inner| MmapRaw { inner })
}
+
+ /// Creates a read-only raw memory map
+ ///
+ /// This is primarily useful to avoid intermediate `Mmap` instances when
+ /// read-only access to files modified elsewhere are required.
+ ///
+ /// # Errors
+ ///
+ /// This method returns an error when the underlying system call fails
+ pub fn map_raw_read_only<T: MmapAsRawDesc>(&self, file: T) -> Result<MmapRaw> {
+ let desc = file.as_raw_desc();
+
+ MmapInner::map(self.get_len(&file)?, desc.0, self.offset, self.populate)
+ .map(|inner| MmapRaw { inner })
+ }
}
/// A handle to an immutable memory mapped buffer.
@@ -646,7 +661,7 @@ impl Mmap {
///
/// See [mlock()](https://man7.org/linux/man-pages/man2/mlock.2.html) map page.
#[cfg(unix)]
- pub fn lock(&mut self) -> Result<()> {
+ pub fn lock(&self) -> Result<()> {
self.inner.lock()
}
@@ -654,9 +669,31 @@ impl Mmap {
///
/// See [munlock()](https://man7.org/linux/man-pages/man2/munlock.2.html) map page.
#[cfg(unix)]
- pub fn unlock(&mut self) -> Result<()> {
+ pub fn unlock(&self) -> Result<()> {
self.inner.unlock()
}
+
+ /// Adjust the size of the memory mapping.
+ ///
+ /// This will try to resize the memory mapping in place. If
+ /// [`RemapOptions::may_move`] is specified it will move the mapping if it
+ /// could not resize in place, otherwise it will error.
+ ///
+ /// Only supported on Linux.
+ ///
+ /// See the [`mremap(2)`] man page.
+ ///
+ /// # Safety
+ ///
+ /// Resizing the memory mapping beyond the end of the mapped file will
+ /// result in UB should you happen to access memory beyond the end of the
+ /// file.
+ ///
+ /// [`mremap(2)`]: https://man7.org/linux/man-pages/man2/mremap.2.html
+ #[cfg(target_os = "linux")]
+ pub unsafe fn remap(&mut self, new_len: usize, options: RemapOptions) -> Result<()> {
+ self.inner.remap(new_len, options)
+ }
}
#[cfg(feature = "stable_deref_trait")]
@@ -837,7 +874,7 @@ impl MmapRaw {
///
/// See [mlock()](https://man7.org/linux/man-pages/man2/mlock.2.html) map page.
#[cfg(unix)]
- pub fn lock(&mut self) -> Result<()> {
+ pub fn lock(&self) -> Result<()> {
self.inner.lock()
}
@@ -845,9 +882,31 @@ impl MmapRaw {
///
/// See [munlock()](https://man7.org/linux/man-pages/man2/munlock.2.html) map page.
#[cfg(unix)]
- pub fn unlock(&mut self) -> Result<()> {
+ pub fn unlock(&self) -> Result<()> {
self.inner.unlock()
}
+
+ /// Adjust the size of the memory mapping.
+ ///
+ /// This will try to resize the memory mapping in place. If
+ /// [`RemapOptions::may_move`] is specified it will move the mapping if it
+ /// could not resize in place, otherwise it will error.
+ ///
+ /// Only supported on Linux.
+ ///
+ /// See the [`mremap(2)`] man page.
+ ///
+ /// # Safety
+ ///
+ /// Resizing the memory mapping beyond the end of the mapped file will
+ /// result in UB should you happen to access memory beyond the end of the
+ /// file.
+ ///
+ /// [`mremap(2)`]: https://man7.org/linux/man-pages/man2/mremap.2.html
+ #[cfg(target_os = "linux")]
+ pub unsafe fn remap(&mut self, new_len: usize, options: RemapOptions) -> Result<()> {
+ self.inner.remap(new_len, options)
+ }
}
impl fmt::Debug for MmapRaw {
@@ -1105,7 +1164,7 @@ impl MmapMut {
///
/// See [mlock()](https://man7.org/linux/man-pages/man2/mlock.2.html) map page.
#[cfg(unix)]
- pub fn lock(&mut self) -> Result<()> {
+ pub fn lock(&self) -> Result<()> {
self.inner.lock()
}
@@ -1113,9 +1172,31 @@ impl MmapMut {
///
/// See [munlock()](https://man7.org/linux/man-pages/man2/munlock.2.html) map page.
#[cfg(unix)]
- pub fn unlock(&mut self) -> Result<()> {
+ pub fn unlock(&self) -> Result<()> {
self.inner.unlock()
}
+
+ /// Adjust the size of the memory mapping.
+ ///
+ /// This will try to resize the memory mapping in place. If
+ /// [`RemapOptions::may_move`] is specified it will move the mapping if it
+ /// could not resize in place, otherwise it will error.
+ ///
+ /// Only supported on Linux.
+ ///
+ /// See the [`mremap(2)`] man page.
+ ///
+ /// # Safety
+ ///
+ /// Resizing the memory mapping beyond the end of the mapped file will
+ /// result in UB should you happen to access memory beyond the end of the
+ /// file.
+ ///
+ /// [`mremap(2)`]: https://man7.org/linux/man-pages/man2/mremap.2.html
+ #[cfg(target_os = "linux")]
+ pub unsafe fn remap(&mut self, new_len: usize, options: RemapOptions) -> Result<()> {
+ self.inner.remap(new_len, options)
+ }
}
#[cfg(feature = "stable_deref_trait")]
@@ -1160,14 +1241,60 @@ impl fmt::Debug for MmapMut {
}
}
+/// Options for [`Mmap::remap`] and [`MmapMut::remap`].
+#[derive(Copy, Clone, Default, Debug)]
+#[cfg(target_os = "linux")]
+pub struct RemapOptions {
+ may_move: bool,
+}
+
+#[cfg(target_os = "linux")]
+impl RemapOptions {
+ /// Creates a mew set of options for resizing a memory map.
+ pub fn new() -> Self {
+ Self::default()
+ }
+
+ /// Controls whether the memory map can be moved if it is not possible to
+ /// resize it in place.
+ ///
+ /// If false then the memory map is guaranteed to remain at the same
+ /// address when being resized but attempting to resize will return an
+ /// error if the new memory map would overlap with something else in the
+ /// current process' memory.
+ ///
+ /// By default this is false.
+ ///
+ /// # `may_move` and `StableDeref`
+ /// If the `stable_deref_trait` feature is enabled then [`Mmap`] and
+ /// [`MmapMut`] implement `StableDeref`. `StableDeref` promises that the
+ /// memory map dereferences to a fixed address, however, calling `remap`
+ /// with `may_move` set may result in the backing memory of the mapping
+ /// being moved to a new address. This may cause UB in other code
+ /// depending on the `StableDeref` guarantees.
+ pub fn may_move(mut self, may_move: bool) -> Self {
+ self.may_move = may_move;
+ self
+ }
+
+ pub(crate) fn into_flags(self) -> libc::c_int {
+ if self.may_move {
+ libc::MREMAP_MAYMOVE
+ } else {
+ 0
+ }
+ }
+}
+
#[cfg(test)]
mod test {
extern crate tempfile;
#[cfg(unix)]
use crate::advice::Advice;
- use std::fs::OpenOptions;
+ use std::fs::{File, OpenOptions};
use std::io::{Read, Write};
+ use std::mem;
#[cfg(unix)]
use std::os::unix::io::AsRawFd;
#[cfg(windows)]
@@ -1257,8 +1384,10 @@ mod test {
.unwrap();
let mmap = unsafe { Mmap::map(&file).unwrap() };
assert!(mmap.is_empty());
+ assert_eq!(mmap.as_ptr().align_offset(mem::size_of::<usize>()), 0);
let mmap = unsafe { MmapMut::map_mut(&file).unwrap() };
assert!(mmap.is_empty());
+ assert_eq!(mmap.as_ptr().align_offset(mem::size_of::<usize>()), 0);
}
#[test]
@@ -1482,7 +1611,7 @@ mod test {
let mmap = mmap.make_exec().expect("make_exec");
- let jitfn: extern "C" fn() -> u8 = unsafe { std::mem::transmute(mmap.as_ptr()) };
+ let jitfn: extern "C" fn() -> u8 = unsafe { mem::transmute(mmap.as_ptr()) };
assert_eq!(jitfn(), 0xab);
}
@@ -1633,6 +1762,22 @@ mod test {
assert_eq!(unsafe { std::ptr::read(mmap.as_ptr()) }, b'a');
}
+ #[test]
+ fn raw_read_only() {
+ let tempdir = tempfile::tempdir().unwrap();
+ let path = tempdir.path().join("mmaprawro");
+
+ File::create(&path).unwrap().write_all(b"abc123").unwrap();
+
+ let mmap = MmapOptions::new()
+ .map_raw_read_only(&File::open(&path).unwrap())
+ .unwrap();
+
+ assert_eq!(mmap.len(), 6);
+ assert!(!mmap.as_ptr().is_null());
+ assert_eq!(unsafe { std::ptr::read(mmap.as_ptr()) }, b'a');
+ }
+
/// Something that relies on StableDeref
#[test]
#[cfg(feature = "stable_deref_trait")]
@@ -1728,7 +1873,7 @@ mod test {
.unwrap();
file.set_len(128).unwrap();
- let mut mmap = unsafe { Mmap::map(&file).unwrap() };
+ let mmap = unsafe { Mmap::map(&file).unwrap() };
#[cfg(target_os = "linux")]
assert!(!is_locked());
@@ -1751,4 +1896,119 @@ mod test {
#[cfg(target_os = "linux")]
assert!(!is_locked());
}
+
+ #[test]
+ #[cfg(target_os = "linux")]
+ fn remap_grow() {
+ use crate::RemapOptions;
+
+ let initial_len = 128;
+ let final_len = 2000;
+
+ let zeros = vec![0u8; final_len];
+ let incr: Vec<u8> = (0..final_len).map(|v| v as u8).collect();
+
+ let file = tempfile::tempfile().unwrap();
+ file.set_len(final_len as u64).unwrap();
+
+ let mut mmap = unsafe { MmapOptions::new().len(initial_len).map_mut(&file).unwrap() };
+ assert_eq!(mmap.len(), initial_len);
+ assert_eq!(&mmap[..], &zeros[..initial_len]);
+
+ unsafe {
+ mmap.remap(final_len, RemapOptions::new().may_move(true))
+ .unwrap()
+ };
+
+ // The size should have been updated
+ assert_eq!(mmap.len(), final_len);
+
+ // Should still be all zeros
+ assert_eq!(&mmap[..], &zeros);
+
+ // Write out to the whole expanded slice.
+ mmap.copy_from_slice(&incr);
+ }
+
+ #[test]
+ #[cfg(target_os = "linux")]
+ fn remap_shrink() {
+ use crate::RemapOptions;
+
+ let initial_len = 20000;
+ let final_len = 400;
+
+ let incr: Vec<u8> = (0..final_len).map(|v| v as u8).collect();
+
+ let file = tempfile::tempfile().unwrap();
+ file.set_len(initial_len as u64).unwrap();
+
+ let mut mmap = unsafe { MmapMut::map_mut(&file).unwrap() };
+ assert_eq!(mmap.len(), initial_len);
+
+ unsafe { mmap.remap(final_len, RemapOptions::new()).unwrap() };
+ assert_eq!(mmap.len(), final_len);
+
+ // Check that the mmap is still writable along the slice length
+ mmap.copy_from_slice(&incr);
+ }
+
+ #[test]
+ #[cfg(target_os = "linux")]
+ #[cfg(target_pointer_width = "32")]
+ fn remap_len_overflow() {
+ use crate::RemapOptions;
+
+ let file = tempfile::tempfile().unwrap();
+ file.set_len(1024).unwrap();
+ let mut mmap = unsafe { MmapOptions::new().len(1024).map(&file).unwrap() };
+
+ let res = unsafe { mmap.remap(0x80000000, RemapOptions::new().may_move(true)) };
+ assert_eq!(
+ res.unwrap_err().to_string(),
+ "memory map length overflows isize"
+ );
+
+ assert_eq!(mmap.len(), 1024);
+ }
+
+ #[test]
+ #[cfg(target_os = "linux")]
+ fn remap_with_offset() {
+ use crate::RemapOptions;
+
+ let offset = 77;
+ let initial_len = 128;
+ let final_len = 2000;
+
+ let zeros = vec![0u8; final_len];
+ let incr: Vec<u8> = (0..final_len).map(|v| v as u8).collect();
+
+ let file = tempfile::tempfile().unwrap();
+ file.set_len(final_len as u64 + offset).unwrap();
+
+ let mut mmap = unsafe {
+ MmapOptions::new()
+ .len(initial_len)
+ .offset(offset)
+ .map_mut(&file)
+ .unwrap()
+ };
+ assert_eq!(mmap.len(), initial_len);
+ assert_eq!(&mmap[..], &zeros[..initial_len]);
+
+ unsafe {
+ mmap.remap(final_len, RemapOptions::new().may_move(true))
+ .unwrap()
+ };
+
+ // The size should have been updated
+ assert_eq!(mmap.len(), final_len);
+
+ // Should still be all zeros
+ assert_eq!(&mmap[..], &zeros);
+
+ // Write out to the whole expanded slice.
+ mmap.copy_from_slice(&incr);
+ }
}
diff --git a/vendor/memmap2/src/unix.rs b/vendor/memmap2/src/unix.rs
index 221d3ba84..faa3b36d3 100644
--- a/vendor/memmap2/src/unix.rs
+++ b/vendor/memmap2/src/unix.rs
@@ -28,6 +28,18 @@ const MAP_POPULATE: libc::c_int = libc::MAP_POPULATE;
#[cfg(not(any(target_os = "linux", target_os = "android")))]
const MAP_POPULATE: libc::c_int = 0;
+#[cfg(any(
+ target_os = "android",
+ all(target_os = "linux", not(target_env = "musl"))
+))]
+use libc::{mmap64 as mmap, off64_t as off_t};
+
+#[cfg(not(any(
+ target_os = "android",
+ all(target_os = "linux", not(target_env = "musl"))
+)))]
+use libc::{mmap, off_t};
+
pub struct MmapInner {
ptr: *mut libc::c_void,
len: usize,
@@ -46,7 +58,48 @@ impl MmapInner {
) -> io::Result<MmapInner> {
let alignment = offset % page_size() as u64;
let aligned_offset = offset - alignment;
- let aligned_len = len + alignment as usize;
+
+ let (map_len, map_offset) = Self::adjust_mmap_params(len as usize, alignment as usize)?;
+
+ unsafe {
+ let ptr = mmap(
+ ptr::null_mut(),
+ map_len as libc::size_t,
+ prot,
+ flags,
+ file,
+ aligned_offset as off_t,
+ );
+
+ if ptr == libc::MAP_FAILED {
+ Err(io::Error::last_os_error())
+ } else {
+ Ok(Self::from_raw_parts(ptr, len, map_offset))
+ }
+ }
+ }
+
+ fn adjust_mmap_params(len: usize, alignment: usize) -> io::Result<(usize, usize)> {
+ use std::isize;
+
+ // Rust's slice cannot be larger than isize::MAX.
+ // See https://doc.rust-lang.org/std/slice/fn.from_raw_parts.html
+ //
+ // This is not a problem on 64-bit targets, but on 32-bit one
+ // having a file or an anonymous mapping larger than 2GB is quite normal
+ // and we have to prevent it.
+ //
+ // The code below is essentially the same as in Rust's std:
+ // https://github.com/rust-lang/rust/blob/db78ab70a88a0a5e89031d7ee4eccec835dcdbde/library/alloc/src/raw_vec.rs#L495
+ if std::mem::size_of::<usize>() < 8 && len > isize::MAX as usize {
+ return Err(io::Error::new(
+ io::ErrorKind::InvalidData,
+ "memory map length overflows isize",
+ ));
+ }
+
+ let map_len = len + alignment;
+ let map_offset = alignment;
// `libc::mmap` does not support zero-size mappings. POSIX defines:
//
@@ -54,7 +107,7 @@ impl MmapInner {
// > If `len` is zero, `mmap()` shall fail and no mapping shall be established.
//
// So if we would create such a mapping, crate a one-byte mapping instead:
- let aligned_len = aligned_len.max(1);
+ let map_len = map_len.max(1);
// Note that in that case `MmapInner::len` is still set to zero,
// and `Mmap` will still dereferences to an empty slice.
@@ -79,25 +132,73 @@ impl MmapInner {
//
// (SIGBUS is still possible by mapping a non-empty file and then truncating it
// to a shorter size, but that is unrelated to this handling of empty files.)
+ Ok((map_len, map_offset))
+ }
- unsafe {
- let ptr = libc::mmap(
- ptr::null_mut(),
- aligned_len as libc::size_t,
- prot,
- flags,
- file,
- aligned_offset as libc::off_t,
- );
+ /// Get the current memory mapping as a `(ptr, map_len, offset)` tuple.
+ ///
+ /// Note that `map_len` is the length of the memory mapping itself and
+ /// _not_ the one that would be passed to `from_raw_parts`.
+ fn as_mmap_params(&self) -> (*mut libc::c_void, usize, usize) {
+ let offset = self.ptr as usize % page_size();
+ let len = self.len + offset;
+
+ // There are two possible memory layouts we could have, depending on
+ // the length and offset passed when constructing this instance:
+ //
+ // 1. The "normal" memory layout looks like this:
+ //
+ // |<------------------>|<---------------------->|
+ // mmap ptr offset ptr public slice
+ //
+ // That is, we have
+ // - The start of the page-aligned memory mapping returned by mmap,
+ // followed by,
+ // - Some number of bytes that are memory mapped but ignored since
+ // they are before the byte offset requested by the user, followed
+ // by,
+ // - The actual memory mapped slice requested by the user.
+ //
+ // This maps cleanly to a (ptr, len, offset) tuple.
+ //
+ // 2. Then, we have the case where the user requested a zero-length
+ // memory mapping. mmap(2) does not support zero-length mappings so
+ // this crate works around that by actually making a mapping of
+ // length one. This means that we have
+ // - A length zero slice, followed by,
+ // - A single memory mapped byte
+ //
+ // Note that this only happens if the offset within the page is also
+ // zero. Otherwise, we have a memory map of offset bytes and not a
+ // zero-length memory map.
+ //
+ // This doesn't fit cleanly into a (ptr, len, offset) tuple. Instead,
+ // we fudge it slightly: a zero-length memory map turns into a
+ // mapping of length one and can't be told apart outside of this
+ // method without knowing the original length.
+ if len == 0 {
+ (self.ptr, 1, 0)
+ } else {
+ (unsafe { self.ptr.offset(-(offset as isize)) }, len, offset)
+ }
+ }
- if ptr == libc::MAP_FAILED {
- Err(io::Error::last_os_error())
- } else {
- Ok(MmapInner {
- ptr: ptr.offset(alignment as isize),
- len,
- })
- }
+ /// Construct this `MmapInner` from its raw components
+ ///
+ /// # Safety
+ ///
+ /// - `ptr` must point to the start of memory mapping that can be freed
+ /// using `munmap(2)` (i.e. returned by `mmap(2)` or `mremap(2)`)
+ /// - The memory mapping at `ptr` must have a length of `len + offset`.
+ /// - If `len + offset == 0` then the memory mapping must be of length 1.
+ /// - `offset` must be less than the current page size.
+ unsafe fn from_raw_parts(ptr: *mut libc::c_void, len: usize, offset: usize) -> Self {
+ debug_assert_eq!(ptr as usize % page_size(), 0, "ptr not page-aligned");
+ debug_assert!(offset < page_size(), "offset larger than page size");
+
+ Self {
+ ptr: ptr.offset(offset as isize),
+ len,
}
}
@@ -254,6 +355,24 @@ impl MmapInner {
}
}
+ #[cfg(target_os = "linux")]
+ pub fn remap(&mut self, new_len: usize, options: crate::RemapOptions) -> io::Result<()> {
+ let (old_ptr, old_len, offset) = self.as_mmap_params();
+ let (map_len, offset) = Self::adjust_mmap_params(new_len, offset)?;
+
+ unsafe {
+ let new_ptr = libc::mremap(old_ptr, old_len, map_len, options.into_flags());
+
+ if new_ptr == libc::MAP_FAILED {
+ Err(io::Error::last_os_error())
+ } else {
+ // We explicitly don't drop self since the pointer within is no longer valid.
+ ptr::write(self, Self::from_raw_parts(new_ptr, new_len, offset));
+ Ok(())
+ }
+ }
+ }
+
pub fn lock(&self) -> io::Result<()> {
unsafe {
if libc::mlock(self.ptr, self.len) != 0 {
@@ -277,16 +396,12 @@ impl MmapInner {
impl Drop for MmapInner {
fn drop(&mut self) {
- let alignment = self.ptr as usize % page_size();
- let len = self.len + alignment;
- let len = len.max(1);
+ let (ptr, len, _) = self.as_mmap_params();
+
// Any errors during unmapping/closing are ignored as the only way
// to report them would be through panicking which is highly discouraged
// in Drop impls, c.f. https://github.com/rust-lang/lang-team/issues/97
- unsafe {
- let ptr = self.ptr.offset(-(alignment as isize));
- libc::munmap(ptr, len as libc::size_t);
- }
+ unsafe { libc::munmap(ptr, len as libc::size_t) };
}
}
diff --git a/vendor/memmap2/src/windows.rs b/vendor/memmap2/src/windows.rs
index c1b1a4d9f..537336904 100644
--- a/vendor/memmap2/src/windows.rs
+++ b/vendor/memmap2/src/windows.rs
@@ -132,9 +132,12 @@ extern "system" {
fn GetSystemInfo(lpSystemInfo: LPSYSTEM_INFO);
}
-/// Returns a fixed pointer that is valid for `slice::from_raw_parts::<u8>` with `len == 0`.
+/// Returns a fixed aligned pointer that is valid for `slice::from_raw_parts::<u8>` with `len == 0`.
+///
+/// This aligns the pointer to `allocation_granularity()` or 1 if unknown.
fn empty_slice_ptr() -> *mut c_void {
- std::ptr::NonNull::<u8>::dangling().cast().as_ptr()
+ let align = allocation_granularity().max(1);
+ unsafe { mem::transmute(align) }
}
pub struct MmapInner {