summaryrefslogtreecommitdiffstats
path: root/third_party/rust/memmap2/src
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/rust/memmap2/src')
-rw-r--r--third_party/rust/memmap2/src/advice.rs384
-rw-r--r--third_party/rust/memmap2/src/lib.rs2179
-rw-r--r--third_party/rust/memmap2/src/stub.rs81
-rw-r--r--third_party/rust/memmap2/src/unix.rs458
-rw-r--r--third_party/rust/memmap2/src/windows.rs524
5 files changed, 3626 insertions, 0 deletions
diff --git a/third_party/rust/memmap2/src/advice.rs b/third_party/rust/memmap2/src/advice.rs
new file mode 100644
index 0000000000..4316058fc8
--- /dev/null
+++ b/third_party/rust/memmap2/src/advice.rs
@@ -0,0 +1,384 @@
+/// Values supported by [`Mmap::advise`][crate::Mmap::advise] and [`MmapMut::advise`][crate::MmapMut::advise] functions.
+///
+/// See [madvise()](https://man7.org/linux/man-pages/man2/madvise.2.html) map page.
+#[repr(i32)]
+#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)]
+pub enum Advice {
+ /// **MADV_NORMAL**
+ ///
+ /// No special treatment. This is the default.
+ Normal = libc::MADV_NORMAL,
+
+ /// **MADV_RANDOM**
+ ///
+ /// Expect page references in random order. (Hence, read
+ /// ahead may be less useful than normally.)
+ Random = libc::MADV_RANDOM,
+
+ /// **MADV_SEQUENTIAL**
+ ///
+ /// Expect page references in sequential order. (Hence, pages
+ /// in the given range can be aggressively read ahead, and may
+ /// be freed soon after they are accessed.)
+ Sequential = libc::MADV_SEQUENTIAL,
+
+ /// **MADV_WILLNEED**
+ ///
+ /// Expect access in the near future. (Hence, it might be a
+ /// good idea to read some pages ahead.)
+ WillNeed = libc::MADV_WILLNEED,
+
+ /// **MADV_DONTFORK** - Linux only (since Linux 2.6.16)
+ ///
+ /// Do not make the pages in this range available to the child
+ /// after a fork(2). This is useful to prevent copy-on-write
+ /// semantics from changing the physical location of a page if
+ /// the parent writes to it after a fork(2). (Such page
+ /// relocations cause problems for hardware that DMAs into the
+ /// page.)
+ #[cfg(target_os = "linux")]
+ DontFork = libc::MADV_DONTFORK,
+
+ /// **MADV_DOFORK** - Linux only (since Linux 2.6.16)
+ ///
+ /// Undo the effect of MADV_DONTFORK, restoring the default
+ /// behavior, whereby a mapping is inherited across fork(2).
+ #[cfg(target_os = "linux")]
+ DoFork = libc::MADV_DOFORK,
+
+ /// **MADV_MERGEABLE** - Linux only (since Linux 2.6.32)
+ ///
+ /// Enable Kernel Samepage Merging (KSM) for the pages in the
+ /// range specified by addr and length. The kernel regularly
+ /// scans those areas of user memory that have been marked as
+ /// mergeable, looking for pages with identical content.
+ /// These are replaced by a single write-protected page (which
+ /// is automatically copied if a process later wants to update
+ /// the content of the page). KSM merges only private
+ /// anonymous pages (see mmap(2)).
+ ///
+ /// The KSM feature is intended for applications that generate
+ /// many instances of the same data (e.g., virtualization
+ /// systems such as KVM). It can consume a lot of processing
+ /// power; use with care. See the Linux kernel source file
+ /// Documentation/admin-guide/mm/ksm.rst for more details.
+ ///
+ /// The MADV_MERGEABLE and MADV_UNMERGEABLE operations are
+ /// available only if the kernel was configured with
+ /// CONFIG_KSM.
+ #[cfg(target_os = "linux")]
+ Mergeable = libc::MADV_MERGEABLE,
+
+ /// **MADV_UNMERGEABLE** - Linux only (since Linux 2.6.32)
+ ///
+ /// Undo the effect of an earlier MADV_MERGEABLE operation on
+ /// the specified address range; KSM unmerges whatever pages
+ /// it had merged in the address range specified by addr and
+ /// length.
+ #[cfg(target_os = "linux")]
+ Unmergeable = libc::MADV_UNMERGEABLE,
+
+ /// **MADV_HUGEPAGE** - Linux only (since Linux 2.6.38)
+ ///
+ /// Enable Transparent Huge Pages (THP) for pages in the range
+ /// specified by addr and length. Currently, Transparent Huge
+ /// Pages work only with private anonymous pages (see
+ /// mmap(2)). The kernel will regularly scan the areas marked
+ /// as huge page candidates to replace them with huge pages.
+ /// The kernel will also allocate huge pages directly when the
+ /// region is naturally aligned to the huge page size (see
+ /// posix_memalign(2)).
+ ///
+ /// This feature is primarily aimed at applications that use
+ /// large mappings of data and access large regions of that
+ /// memory at a time (e.g., virtualization systems such as
+ /// QEMU). It can very easily waste memory (e.g., a 2 MB
+ /// mapping that only ever accesses 1 byte will result in 2 MB
+ /// of wired memory instead of one 4 KB page). See the Linux
+ /// kernel source file
+ /// Documentation/admin-guide/mm/transhuge.rst for more
+ /// details.
+ ///
+ /// Most common kernels configurations provide MADV_HUGEPAGE-
+ /// style behavior by default, and thus MADV_HUGEPAGE is
+ /// normally not necessary. It is mostly intended for
+ /// embedded systems, where MADV_HUGEPAGE-style behavior may
+ /// not be enabled by default in the kernel. On such systems,
+ /// this flag can be used in order to selectively enable THP.
+ /// Whenever MADV_HUGEPAGE is used, it should always be in
+ /// regions of memory with an access pattern that the
+ /// developer knows in advance won't risk to increase the
+ /// memory footprint of the application when transparent
+ /// hugepages are enabled.
+ ///
+ /// The MADV_HUGEPAGE and MADV_NOHUGEPAGE operations are
+ /// available only if the kernel was configured with
+ /// CONFIG_TRANSPARENT_HUGEPAGE.
+ #[cfg(target_os = "linux")]
+ HugePage = libc::MADV_HUGEPAGE,
+
+ /// **MADV_NOHUGEPAGE** - Linux only (since Linux 2.6.38)
+ ///
+ /// Ensures that memory in the address range specified by addr
+ /// and length will not be backed by transparent hugepages.
+ #[cfg(target_os = "linux")]
+ NoHugePage = libc::MADV_NOHUGEPAGE,
+
+ /// **MADV_DONTDUMP** - Linux only (since Linux 3.4)
+ ///
+ /// Exclude from a core dump those pages in the range
+ /// specified by addr and length. This is useful in
+ /// applications that have large areas of memory that are
+ /// known not to be useful in a core dump. The effect of
+ /// **MADV_DONTDUMP** takes precedence over the bit mask that is
+ /// set via the `/proc/[pid]/coredump_filter` file (see
+ /// core(5)).
+ #[cfg(target_os = "linux")]
+ DontDump = libc::MADV_DONTDUMP,
+
+ /// **MADV_DODUMP** - Linux only (since Linux 3.4)
+ ///
+ /// Undo the effect of an earlier MADV_DONTDUMP.
+ #[cfg(target_os = "linux")]
+ DoDump = libc::MADV_DODUMP,
+
+ /// **MADV_HWPOISON** - Linux only (since Linux 2.6.32)
+ ///
+ /// Poison the pages in the range specified by addr and length
+ /// and handle subsequent references to those pages like a
+ /// hardware memory corruption. This operation is available
+ /// only for privileged (CAP_SYS_ADMIN) processes. This
+ /// operation may result in the calling process receiving a
+ /// SIGBUS and the page being unmapped.
+ ///
+ /// This feature is intended for testing of memory error-
+ /// handling code; it is available only if the kernel was
+ /// configured with CONFIG_MEMORY_FAILURE.
+ #[cfg(target_os = "linux")]
+ HwPoison = libc::MADV_HWPOISON,
+
+ /// **MADV_POPULATE_READ** - Linux only (since Linux 5.14)
+ ///
+ /// Populate (prefault) page tables readable, faulting in all
+ /// pages in the range just as if manually reading from each
+ /// page; however, avoid the actual memory access that would have
+ /// been performed after handling the fault.
+ ///
+ /// In contrast to MAP_POPULATE, MADV_POPULATE_READ does not hide
+ /// errors, can be applied to (parts of) existing mappings and
+ /// will always populate (prefault) page tables readable. One
+ /// example use case is prefaulting a file mapping, reading all
+ /// file content from disk; however, pages won't be dirtied and
+ /// consequently won't have to be written back to disk when
+ /// evicting the pages from memory.
+ ///
+ /// Depending on the underlying mapping, map the shared zeropage,
+ /// preallocate memory or read the underlying file; files with
+ /// holes might or might not preallocate blocks. If populating
+ /// fails, a SIGBUS signal is not generated; instead, an error is
+ /// returned.
+ ///
+ /// If MADV_POPULATE_READ succeeds, all page tables have been
+ /// populated (prefaulted) readable once. If MADV_POPULATE_READ
+ /// fails, some page tables might have been populated.
+ ///
+ /// MADV_POPULATE_READ cannot be applied to mappings without read
+ /// permissions and special mappings, for example, mappings
+ /// marked with kernel-internal flags such as VM_PFNMAP or VM_IO,
+ /// or secret memory regions created using memfd_secret(2).
+ ///
+ /// Note that with MADV_POPULATE_READ, the process can be killed
+ /// at any moment when the system runs out of memory.
+ #[cfg(target_os = "linux")]
+ PopulateRead = libc::MADV_POPULATE_READ,
+
+ /// **MADV_POPULATE_WRITE** - Linux only (since Linux 5.14)
+ ///
+ /// Populate (prefault) page tables writable, faulting in all
+ /// pages in the range just as if manually writing to each each
+ /// page; however, avoid the actual memory access that would have
+ /// been performed after handling the fault.
+ ///
+ /// In contrast to MAP_POPULATE, MADV_POPULATE_WRITE does not
+ /// hide errors, can be applied to (parts of) existing mappings
+ /// and will always populate (prefault) page tables writable.
+ /// One example use case is preallocating memory, breaking any
+ /// CoW (Copy on Write).
+ ///
+ /// Depending on the underlying mapping, preallocate memory or
+ /// read the underlying file; files with holes will preallocate
+ /// blocks. If populating fails, a SIGBUS signal is not gener‐
+ /// ated; instead, an error is returned.
+ ///
+ /// If MADV_POPULATE_WRITE succeeds, all page tables have been
+ /// populated (prefaulted) writable once. If MADV_POPULATE_WRITE
+ /// fails, some page tables might have been populated.
+ ///
+ /// MADV_POPULATE_WRITE cannot be applied to mappings without
+ /// write permissions and special mappings, for example, mappings
+ /// marked with kernel-internal flags such as VM_PFNMAP or VM_IO,
+ /// or secret memory regions created using memfd_secret(2).
+ ///
+ /// Note that with MADV_POPULATE_WRITE, the process can be killed
+ /// at any moment when the system runs out of memory.
+ #[cfg(target_os = "linux")]
+ PopulateWrite = libc::MADV_POPULATE_WRITE,
+
+ /// **MADV_ZERO_WIRED_PAGES** - Darwin only
+ ///
+ /// Indicates that the application would like the wired pages in this address range to be
+ /// zeroed out if the address range is deallocated without first unwiring the pages (i.e.
+ /// a munmap(2) without a preceding munlock(2) or the application quits). This is used
+ /// with madvise() system call.
+ #[cfg(any(target_os = "macos", target_os = "ios"))]
+ ZeroWiredPages = libc::MADV_ZERO_WIRED_PAGES,
+}
+
+/// Values supported by [`Mmap::unsafe_advise`][crate::Mmap::unsafe_advise] and [`MmapMut::unsafe_advise`][crate::MmapMut::unsafe_advise] functions.
+///
+/// These flags can be passed to the [madvise (2)][man_page] system call
+/// and effects on the mapped pages which are conceptually writes,
+/// i.e. the change the observable contents of these pages which
+/// implies undefined behaviour if the mapping is still borrowed.
+///
+/// Hence, these potentially unsafe flags must be used with the unsafe
+/// methods and the programmer has to justify that the code
+/// does not keep any borrows of the mapping active while the mapped pages
+/// are updated by the kernel's memory management subsystem.
+///
+/// [man_page]: https://man7.org/linux/man-pages/man2/madvise.2.html
+#[repr(i32)]
+#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)]
+pub enum UncheckedAdvice {
+ /// **MADV_DONTNEED**
+ ///
+ /// Do not expect access in the near future. (For the time
+ /// being, the application is finished with the given range,
+ /// so the kernel can free resources associated with it.)
+ ///
+ /// After a successful MADV_DONTNEED operation, the semantics
+ /// of memory access in the specified region are changed:
+ /// subsequent accesses of pages in the range will succeed,
+ /// but will result in either repopulating the memory contents
+ /// from the up-to-date contents of the underlying mapped file
+ /// (for shared file mappings, shared anonymous mappings, and
+ /// shmem-based techniques such as System V shared memory
+ /// segments) or zero-fill-on-demand pages for anonymous
+ /// private mappings.
+ ///
+ /// Note that, when applied to shared mappings, MADV_DONTNEED
+ /// might not lead to immediate freeing of the pages in the
+ /// range. The kernel is free to delay freeing the pages
+ /// until an appropriate moment. The resident set size (RSS)
+ /// of the calling process will be immediately reduced
+ /// however.
+ ///
+ /// **MADV_DONTNEED** cannot be applied to locked pages, Huge TLB
+ /// pages, or VM_PFNMAP pages. (Pages marked with the kernel-
+ /// internal VM_PFNMAP flag are special memory areas that are
+ /// not managed by the virtual memory subsystem. Such pages
+ /// are typically created by device drivers that map the pages
+ /// into user space.)
+ ///
+ /// # Safety
+ ///
+ /// Using the returned value with conceptually write to the
+ /// mapped pages, i.e. borrowing the mapping when the pages
+ /// are freed results in undefined behaviour.
+ DontNeed = libc::MADV_DONTNEED,
+
+ //
+ // The rest are Linux-specific
+ //
+ /// **MADV_FREE** - Linux (since Linux 4.5) and Darwin
+ ///
+ /// The application no longer requires the pages in the range
+ /// specified by addr and len. The kernel can thus free these
+ /// pages, but the freeing could be delayed until memory
+ /// pressure occurs. For each of the pages that has been
+ /// marked to be freed but has not yet been freed, the free
+ /// operation will be canceled if the caller writes into the
+ /// page. After a successful MADV_FREE operation, any stale
+ /// data (i.e., dirty, unwritten pages) will be lost when the
+ /// kernel frees the pages. However, subsequent writes to
+ /// pages in the range will succeed and then kernel cannot
+ /// free those dirtied pages, so that the caller can always
+ /// see just written data. If there is no subsequent write,
+ /// the kernel can free the pages at any time. Once pages in
+ /// the range have been freed, the caller will see zero-fill-
+ /// on-demand pages upon subsequent page references.
+ ///
+ /// The MADV_FREE operation can be applied only to private
+ /// anonymous pages (see mmap(2)). In Linux before version
+ /// 4.12, when freeing pages on a swapless system, the pages
+ /// in the given range are freed instantly, regardless of
+ /// memory pressure.
+ ///
+ /// # Safety
+ ///
+ /// Using the returned value with conceptually write to the
+ /// mapped pages, i.e. borrowing the mapping while the pages
+ /// are still being freed results in undefined behaviour.
+ #[cfg(any(target_os = "linux", target_os = "macos", target_os = "ios"))]
+ Free = libc::MADV_FREE,
+
+ /// **MADV_REMOVE** - Linux only (since Linux 2.6.16)
+ ///
+ /// Free up a given range of pages and its associated backing
+ /// store. This is equivalent to punching a hole in the
+ /// corresponding byte range of the backing store (see
+ /// fallocate(2)). Subsequent accesses in the specified
+ /// address range will see bytes containing zero.
+ ///
+ /// The specified address range must be mapped shared and
+ /// writable. This flag cannot be applied to locked pages,
+ /// Huge TLB pages, or VM_PFNMAP pages.
+ ///
+ /// In the initial implementation, only tmpfs(5) was supported
+ /// **MADV_REMOVE**; but since Linux 3.5, any filesystem which
+ /// supports the fallocate(2) FALLOC_FL_PUNCH_HOLE mode also
+ /// supports MADV_REMOVE. Hugetlbfs fails with the error
+ /// EINVAL and other filesystems fail with the error
+ /// EOPNOTSUPP.
+ ///
+ /// # Safety
+ ///
+ /// Using the returned value with conceptually write to the
+ /// mapped pages, i.e. borrowing the mapping when the pages
+ /// are freed results in undefined behaviour.
+ #[cfg(target_os = "linux")]
+ Remove = libc::MADV_REMOVE,
+
+ /// **MADV_FREE_REUSABLE** - Darwin only
+ ///
+ /// Behaves like **MADV_FREE**, but the freed pages are accounted for in the RSS of the process.
+ ///
+ /// # Safety
+ ///
+ /// Using the returned value with conceptually write to the
+ /// mapped pages, i.e. borrowing the mapping while the pages
+ /// are still being freed results in undefined behaviour.
+ #[cfg(any(target_os = "macos", target_os = "ios"))]
+ FreeReusable = libc::MADV_FREE_REUSABLE,
+
+ /// **MADV_FREE_REUSE** - Darwin only
+ ///
+ /// Marks a memory region previously freed by **MADV_FREE_REUSABLE** as non-reusable, accounts
+ /// for the pages in the RSS of the process. Pages that have been freed will be replaced by
+ /// zero-filled pages on demand, other pages will be left as is.
+ ///
+ /// # Safety
+ ///
+ /// Using the returned value with conceptually write to the
+ /// mapped pages, i.e. borrowing the mapping while the pages
+ /// are still being freed results in undefined behaviour.
+ #[cfg(any(target_os = "macos", target_os = "ios"))]
+ FreeReuse = libc::MADV_FREE_REUSE,
+}
+
+// Future expansion:
+// MADV_SOFT_OFFLINE (since Linux 2.6.33)
+// MADV_WIPEONFORK (since Linux 4.14)
+// MADV_KEEPONFORK (since Linux 4.14)
+// MADV_COLD (since Linux 5.4)
+// MADV_PAGEOUT (since Linux 5.4)
diff --git a/third_party/rust/memmap2/src/lib.rs b/third_party/rust/memmap2/src/lib.rs
new file mode 100644
index 0000000000..fb912e6d03
--- /dev/null
+++ b/third_party/rust/memmap2/src/lib.rs
@@ -0,0 +1,2179 @@
+//! A cross-platform Rust API for memory mapped buffers.
+//!
+//! The core functionality is provided by either [`Mmap`] or [`MmapMut`],
+//! which correspond to mapping a [`File`] to a [`&[u8]`](https://doc.rust-lang.org/std/primitive.slice.html)
+//! or [`&mut [u8]`](https://doc.rust-lang.org/std/primitive.slice.html)
+//! respectively. Both function by dereferencing to a slice, allowing the
+//! [`Mmap`]/[`MmapMut`] to be used in the same way you would the equivalent slice
+//! types.
+//!
+//! [`File`]: std::fs::File
+//!
+//! # Examples
+//!
+//! For simple cases [`Mmap`] can be used directly:
+//!
+//! ```
+//! use std::fs::File;
+//! use std::io::Read;
+//!
+//! use memmap2::Mmap;
+//!
+//! # fn main() -> std::io::Result<()> {
+//! let mut file = File::open("LICENSE-APACHE")?;
+//!
+//! let mut contents = Vec::new();
+//! file.read_to_end(&mut contents)?;
+//!
+//! let mmap = unsafe { Mmap::map(&file)? };
+//!
+//! assert_eq!(&contents[..], &mmap[..]);
+//! # Ok(())
+//! # }
+//! ```
+//!
+//! However for cases which require configuration of the mapping, then
+//! you can use [`MmapOptions`] in order to further configure a mapping
+//! before you create it.
+
+#![allow(clippy::len_without_is_empty, clippy::missing_safety_doc)]
+
+#[cfg_attr(unix, path = "unix.rs")]
+#[cfg_attr(windows, path = "windows.rs")]
+#[cfg_attr(not(any(unix, windows)), path = "stub.rs")]
+mod os;
+use crate::os::{file_len, MmapInner};
+
+#[cfg(unix)]
+mod advice;
+#[cfg(unix)]
+pub use crate::advice::{Advice, UncheckedAdvice};
+
+use std::fmt;
+#[cfg(not(any(unix, windows)))]
+use std::fs::File;
+use std::io::{Error, ErrorKind, Result};
+use std::isize;
+use std::mem;
+use std::ops::{Deref, DerefMut};
+#[cfg(unix)]
+use std::os::unix::io::{AsRawFd, RawFd};
+#[cfg(windows)]
+use std::os::windows::io::{AsRawHandle, RawHandle};
+use std::slice;
+
+#[cfg(not(any(unix, windows)))]
+pub struct MmapRawDescriptor<'a>(&'a File);
+
+#[cfg(unix)]
+pub struct MmapRawDescriptor(RawFd);
+
+#[cfg(windows)]
+pub struct MmapRawDescriptor(RawHandle);
+
+pub trait MmapAsRawDesc {
+ fn as_raw_desc(&self) -> MmapRawDescriptor;
+}
+
+#[cfg(not(any(unix, windows)))]
+impl MmapAsRawDesc for &File {
+ fn as_raw_desc(&self) -> MmapRawDescriptor {
+ MmapRawDescriptor(self)
+ }
+}
+
+#[cfg(unix)]
+impl MmapAsRawDesc for RawFd {
+ fn as_raw_desc(&self) -> MmapRawDescriptor {
+ MmapRawDescriptor(*self)
+ }
+}
+
+#[cfg(unix)]
+impl<'a, T> MmapAsRawDesc for &'a T
+where
+ T: AsRawFd,
+{
+ fn as_raw_desc(&self) -> MmapRawDescriptor {
+ MmapRawDescriptor(self.as_raw_fd())
+ }
+}
+
+#[cfg(windows)]
+impl MmapAsRawDesc for RawHandle {
+ fn as_raw_desc(&self) -> MmapRawDescriptor {
+ MmapRawDescriptor(*self)
+ }
+}
+
+#[cfg(windows)]
+impl<'a, T> MmapAsRawDesc for &'a T
+where
+ T: AsRawHandle,
+{
+ fn as_raw_desc(&self) -> MmapRawDescriptor {
+ MmapRawDescriptor(self.as_raw_handle())
+ }
+}
+
+/// A memory map builder, providing advanced options and flags for specifying memory map behavior.
+///
+/// `MmapOptions` can be used to create an anonymous memory map using [`map_anon()`], or a
+/// file-backed memory map using one of [`map()`], [`map_mut()`], [`map_exec()`],
+/// [`map_copy()`], or [`map_copy_read_only()`].
+///
+/// ## Safety
+///
+/// All file-backed memory map constructors are marked `unsafe` because of the potential for
+/// *Undefined Behavior* (UB) using the map if the underlying file is subsequently modified, in or
+/// out of process. Applications must consider the risk and take appropriate precautions when
+/// using file-backed maps. Solutions such as file permissions, locks or process-private (e.g.
+/// unlinked) files exist but are platform specific and limited.
+///
+/// [`map_anon()`]: MmapOptions::map_anon()
+/// [`map()`]: MmapOptions::map()
+/// [`map_mut()`]: MmapOptions::map_mut()
+/// [`map_exec()`]: MmapOptions::map_exec()
+/// [`map_copy()`]: MmapOptions::map_copy()
+/// [`map_copy_read_only()`]: MmapOptions::map_copy_read_only()
+#[derive(Clone, Debug, Default)]
+pub struct MmapOptions {
+ offset: u64,
+ len: Option<usize>,
+ huge: Option<u8>,
+ stack: bool,
+ populate: bool,
+}
+
+impl MmapOptions {
+ /// Creates a new set of options for configuring and creating a memory map.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use memmap2::{MmapMut, MmapOptions};
+ /// # use std::io::Result;
+ ///
+ /// # fn main() -> Result<()> {
+ /// // Create a new memory map builder.
+ /// let mut mmap_options = MmapOptions::new();
+ ///
+ /// // Configure the memory map builder using option setters, then create
+ /// // a memory map using one of `mmap_options.map_anon`, `mmap_options.map`,
+ /// // `mmap_options.map_mut`, `mmap_options.map_exec`, or `mmap_options.map_copy`:
+ /// let mut mmap: MmapMut = mmap_options.len(36).map_anon()?;
+ ///
+ /// // Use the memory map:
+ /// mmap.copy_from_slice(b"...data to copy to the memory map...");
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn new() -> MmapOptions {
+ MmapOptions::default()
+ }
+
+ /// Configures the memory map to start at byte `offset` from the beginning of the file.
+ ///
+ /// This option has no effect on anonymous memory maps.
+ ///
+ /// By default, the offset is 0.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use memmap2::MmapOptions;
+ /// use std::fs::File;
+ ///
+ /// # fn main() -> std::io::Result<()> {
+ /// let mmap = unsafe {
+ /// MmapOptions::new()
+ /// .offset(30)
+ /// .map(&File::open("LICENSE-APACHE")?)?
+ /// };
+ /// assert_eq!(&b"Apache License"[..],
+ /// &mmap[..14]);
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn offset(&mut self, offset: u64) -> &mut Self {
+ self.offset = offset;
+ self
+ }
+
+ /// Configures the created memory mapped buffer to be `len` bytes long.
+ ///
+ /// This option is mandatory for anonymous memory maps.
+ ///
+ /// For file-backed memory maps, the length will default to the file length.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use memmap2::MmapOptions;
+ /// use std::fs::File;
+ ///
+ /// # fn main() -> std::io::Result<()> {
+ /// let mmap = unsafe {
+ /// MmapOptions::new()
+ /// .len(9)
+ /// .map(&File::open("README.md")?)?
+ /// };
+ /// assert_eq!(&b"# memmap2"[..], &mmap[..]);
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn len(&mut self, len: usize) -> &mut Self {
+ self.len = Some(len);
+ self
+ }
+
+ /// Returns the configured length, or the length of the provided file.
+ fn get_len<T: MmapAsRawDesc>(&self, file: &T) -> Result<usize> {
+ self.len.map(Ok).unwrap_or_else(|| {
+ let desc = file.as_raw_desc();
+ let file_len = file_len(desc.0)?;
+
+ if file_len < self.offset {
+ return Err(Error::new(
+ ErrorKind::InvalidData,
+ "memory map offset is larger than length",
+ ));
+ }
+ let len = file_len - self.offset;
+
+ // Rust's slice cannot be larger than isize::MAX.
+ // See https://doc.rust-lang.org/std/slice/fn.from_raw_parts.html
+ //
+ // This is not a problem on 64-bit targets, but on 32-bit one
+ // having a file or an anonymous mapping larger than 2GB is quite normal
+ // and we have to prevent it.
+ //
+ // The code below is essentially the same as in Rust's std:
+ // https://github.com/rust-lang/rust/blob/db78ab70a88a0a5e89031d7ee4eccec835dcdbde/library/alloc/src/raw_vec.rs#L495
+ if mem::size_of::<usize>() < 8 && len > isize::MAX as u64 {
+ return Err(Error::new(
+ ErrorKind::InvalidData,
+ "memory map length overflows isize",
+ ));
+ }
+
+ Ok(len as usize)
+ })
+ }
+
+ /// Configures the anonymous memory map to be suitable for a process or thread stack.
+ ///
+ /// This option corresponds to the `MAP_STACK` flag on Linux. It has no effect on Windows.
+ ///
+ /// This option has no effect on file-backed memory maps.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use memmap2::MmapOptions;
+ ///
+ /// # fn main() -> std::io::Result<()> {
+ /// let stack = MmapOptions::new().stack().len(4096).map_anon();
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn stack(&mut self) -> &mut Self {
+ self.stack = true;
+ self
+ }
+
+ /// Configures the anonymous memory map to be allocated using huge pages.
+ ///
+ /// This option corresponds to the `MAP_HUGETLB` flag on Linux. It has no effect on Windows.
+ ///
+ /// The size of the requested page can be specified in page bits. If not provided, the system
+ /// default is requested. The requested length should be a multiple of this, or the mapping
+ /// will fail.
+ ///
+ /// This option has no effect on file-backed memory maps.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use memmap2::MmapOptions;
+ ///
+ /// # fn main() -> std::io::Result<()> {
+ /// let stack = MmapOptions::new().huge(Some(21)).len(2*1024*1024).map_anon();
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn huge(&mut self, page_bits: Option<u8>) -> &mut Self {
+ self.huge = Some(page_bits.unwrap_or(0));
+ self
+ }
+ /// Populate (prefault) page tables for a mapping.
+ ///
+ /// For a file mapping, this causes read-ahead on the file. This will help to reduce blocking on page faults later.
+ ///
+ /// This option corresponds to the `MAP_POPULATE` flag on Linux. It has no effect on Windows.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use memmap2::MmapOptions;
+ /// use std::fs::File;
+ ///
+ /// # fn main() -> std::io::Result<()> {
+ /// let file = File::open("LICENSE-MIT")?;
+ ///
+ /// let mmap = unsafe {
+ /// MmapOptions::new().populate().map(&file)?
+ /// };
+ ///
+ /// assert_eq!(&b"Copyright"[..], &mmap[..9]);
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn populate(&mut self) -> &mut Self {
+ self.populate = true;
+ self
+ }
+
+ /// Creates a read-only memory map backed by a file.
+ ///
+ /// # Errors
+ ///
+ /// This method returns an error when the underlying system call fails, which can happen for a
+ /// variety of reasons, such as when the file is not open with read permissions.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use memmap2::MmapOptions;
+ /// use std::fs::File;
+ /// use std::io::Read;
+ ///
+ /// # fn main() -> std::io::Result<()> {
+ /// let mut file = File::open("LICENSE-APACHE")?;
+ ///
+ /// let mut contents = Vec::new();
+ /// file.read_to_end(&mut contents)?;
+ ///
+ /// let mmap = unsafe {
+ /// MmapOptions::new().map(&file)?
+ /// };
+ ///
+ /// assert_eq!(&contents[..], &mmap[..]);
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub unsafe fn map<T: MmapAsRawDesc>(&self, file: T) -> Result<Mmap> {
+ let desc = file.as_raw_desc();
+
+ MmapInner::map(self.get_len(&file)?, desc.0, self.offset, self.populate)
+ .map(|inner| Mmap { inner })
+ }
+
+ /// Creates a readable and executable memory map backed by a file.
+ ///
+ /// # Errors
+ ///
+ /// This method returns an error when the underlying system call fails, which can happen for a
+ /// variety of reasons, such as when the file is not open with read permissions.
+ pub unsafe fn map_exec<T: MmapAsRawDesc>(&self, file: T) -> Result<Mmap> {
+ let desc = file.as_raw_desc();
+
+ MmapInner::map_exec(self.get_len(&file)?, desc.0, self.offset, self.populate)
+ .map(|inner| Mmap { inner })
+ }
+
+ /// Creates a writeable memory map backed by a file.
+ ///
+ /// # Errors
+ ///
+ /// This method returns an error when the underlying system call fails, which can happen for a
+ /// variety of reasons, such as when the file is not open with read and write permissions.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # extern crate memmap2;
+ /// # extern crate tempfile;
+ /// #
+ /// use std::fs::OpenOptions;
+ /// use std::path::PathBuf;
+ ///
+ /// use memmap2::MmapOptions;
+ /// #
+ /// # fn main() -> std::io::Result<()> {
+ /// # let tempdir = tempfile::tempdir()?;
+ /// let path: PathBuf = /* path to file */
+ /// # tempdir.path().join("map_mut");
+ /// let file = OpenOptions::new().read(true).write(true).create(true).open(&path)?;
+ /// file.set_len(13)?;
+ ///
+ /// let mut mmap = unsafe {
+ /// MmapOptions::new().map_mut(&file)?
+ /// };
+ ///
+ /// mmap.copy_from_slice(b"Hello, world!");
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub unsafe fn map_mut<T: MmapAsRawDesc>(&self, file: T) -> Result<MmapMut> {
+ let desc = file.as_raw_desc();
+
+ MmapInner::map_mut(self.get_len(&file)?, desc.0, self.offset, self.populate)
+ .map(|inner| MmapMut { inner })
+ }
+
+ /// Creates a copy-on-write memory map backed by a file.
+ ///
+ /// Data written to the memory map will not be visible by other processes,
+ /// and will not be carried through to the underlying file.
+ ///
+ /// # Errors
+ ///
+ /// This method returns an error when the underlying system call fails, which can happen for a
+ /// variety of reasons, such as when the file is not open with writable permissions.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use memmap2::MmapOptions;
+ /// use std::fs::File;
+ /// use std::io::Write;
+ ///
+ /// # fn main() -> std::io::Result<()> {
+ /// let file = File::open("LICENSE-APACHE")?;
+ /// let mut mmap = unsafe { MmapOptions::new().map_copy(&file)? };
+ /// (&mut mmap[..]).write_all(b"Hello, world!")?;
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub unsafe fn map_copy<T: MmapAsRawDesc>(&self, file: T) -> Result<MmapMut> {
+ let desc = file.as_raw_desc();
+
+ MmapInner::map_copy(self.get_len(&file)?, desc.0, self.offset, self.populate)
+ .map(|inner| MmapMut { inner })
+ }
+
+ /// Creates a copy-on-write read-only memory map backed by a file.
+ ///
+ /// # Errors
+ ///
+ /// This method returns an error when the underlying system call fails, which can happen for a
+ /// variety of reasons, such as when the file is not open with read permissions.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use memmap2::MmapOptions;
+ /// use std::fs::File;
+ /// use std::io::Read;
+ ///
+ /// # fn main() -> std::io::Result<()> {
+ /// let mut file = File::open("README.md")?;
+ ///
+ /// let mut contents = Vec::new();
+ /// file.read_to_end(&mut contents)?;
+ ///
+ /// let mmap = unsafe {
+ /// MmapOptions::new().map_copy_read_only(&file)?
+ /// };
+ ///
+ /// assert_eq!(&contents[..], &mmap[..]);
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub unsafe fn map_copy_read_only<T: MmapAsRawDesc>(&self, file: T) -> Result<Mmap> {
+ let desc = file.as_raw_desc();
+
+ MmapInner::map_copy_read_only(self.get_len(&file)?, desc.0, self.offset, self.populate)
+ .map(|inner| Mmap { inner })
+ }
+
+ /// Creates an anonymous memory map.
+ ///
+ /// The memory map length should be configured using [`MmapOptions::len()`]
+ /// before creating an anonymous memory map, otherwise a zero-length mapping
+ /// will be crated.
+ ///
+ /// # Errors
+ ///
+ /// This method returns an error when the underlying system call fails or
+ /// when `len > isize::MAX`.
+ pub fn map_anon(&self) -> Result<MmapMut> {
+ let len = self.len.unwrap_or(0);
+
+ // See get_len() for details.
+ if mem::size_of::<usize>() < 8 && len > isize::MAX as usize {
+ return Err(Error::new(
+ ErrorKind::InvalidData,
+ "memory map length overflows isize",
+ ));
+ }
+
+ MmapInner::map_anon(len, self.stack, self.populate, self.huge)
+ .map(|inner| MmapMut { inner })
+ }
+
+ /// Creates a raw memory map.
+ ///
+ /// # Errors
+ ///
+ /// This method returns an error when the underlying system call fails, which can happen for a
+ /// variety of reasons, such as when the file is not open with read and write permissions.
+ pub fn map_raw<T: MmapAsRawDesc>(&self, file: T) -> Result<MmapRaw> {
+ let desc = file.as_raw_desc();
+
+ MmapInner::map_mut(self.get_len(&file)?, desc.0, self.offset, self.populate)
+ .map(|inner| MmapRaw { inner })
+ }
+
+ /// Creates a read-only raw memory map
+ ///
+ /// This is primarily useful to avoid intermediate `Mmap` instances when
+ /// read-only access to files modified elsewhere are required.
+ ///
+ /// # Errors
+ ///
+ /// This method returns an error when the underlying system call fails
+ pub fn map_raw_read_only<T: MmapAsRawDesc>(&self, file: T) -> Result<MmapRaw> {
+ let desc = file.as_raw_desc();
+
+ MmapInner::map(self.get_len(&file)?, desc.0, self.offset, self.populate)
+ .map(|inner| MmapRaw { inner })
+ }
+}
+
+/// A handle to an immutable memory mapped buffer.
+///
+/// A `Mmap` may be backed by a file, or it can be anonymous map, backed by volatile memory. Use
+/// [`MmapOptions`] or [`map()`] to create a file-backed memory map. To create an immutable
+/// anonymous memory map, first create a mutable anonymous memory map, and then make it immutable
+/// with [`MmapMut::make_read_only()`].
+///
+/// A file backed `Mmap` is created by `&File` reference, and will remain valid even after the
+/// `File` is dropped. In other words, the `Mmap` handle is completely independent of the `File`
+/// used to create it. For consistency, on some platforms this is achieved by duplicating the
+/// underlying file handle. The memory will be unmapped when the `Mmap` handle is dropped.
+///
+/// Dereferencing and accessing the bytes of the buffer may result in page faults (e.g. swapping
+/// the mapped pages into physical memory) though the details of this are platform specific.
+///
+/// `Mmap` is [`Sync`] and [`Send`].
+///
+/// ## Safety
+///
+/// All file-backed memory map constructors are marked `unsafe` because of the potential for
+/// *Undefined Behavior* (UB) using the map if the underlying file is subsequently modified, in or
+/// out of process. Applications must consider the risk and take appropriate precautions when using
+/// file-backed maps. Solutions such as file permissions, locks or process-private (e.g. unlinked)
+/// files exist but are platform specific and limited.
+///
+/// ## Example
+///
+/// ```
+/// use memmap2::MmapOptions;
+/// use std::io::Write;
+/// use std::fs::File;
+///
+/// # fn main() -> std::io::Result<()> {
+/// let file = File::open("README.md")?;
+/// let mmap = unsafe { MmapOptions::new().map(&file)? };
+/// assert_eq!(b"# memmap2", &mmap[0..9]);
+/// # Ok(())
+/// # }
+/// ```
+///
+/// See [`MmapMut`] for the mutable version.
+///
+/// [`map()`]: Mmap::map()
+pub struct Mmap {
+ inner: MmapInner,
+}
+
+impl Mmap {
+ /// Creates a read-only memory map backed by a file.
+ ///
+ /// This is equivalent to calling `MmapOptions::new().map(file)`.
+ ///
+ /// # Errors
+ ///
+ /// This method returns an error when the underlying system call fails, which can happen for a
+ /// variety of reasons, such as when the file is not open with read permissions.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::fs::File;
+ /// use std::io::Read;
+ ///
+ /// use memmap2::Mmap;
+ ///
+ /// # fn main() -> std::io::Result<()> {
+ /// let mut file = File::open("LICENSE-APACHE")?;
+ ///
+ /// let mut contents = Vec::new();
+ /// file.read_to_end(&mut contents)?;
+ ///
+ /// let mmap = unsafe { Mmap::map(&file)? };
+ ///
+ /// assert_eq!(&contents[..], &mmap[..]);
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub unsafe fn map<T: MmapAsRawDesc>(file: T) -> Result<Mmap> {
+ MmapOptions::new().map(file)
+ }
+
+ /// Transition the memory map to be writable.
+ ///
+ /// If the memory map is file-backed, the file must have been opened with write permissions.
+ ///
+ /// # Errors
+ ///
+ /// This method returns an error when the underlying system call fails, which can happen for a
+ /// variety of reasons, such as when the file is not open with writable permissions.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # extern crate memmap2;
+ /// # extern crate tempfile;
+ /// #
+ /// use memmap2::Mmap;
+ /// use std::ops::DerefMut;
+ /// use std::io::Write;
+ /// # use std::fs::OpenOptions;
+ ///
+ /// # fn main() -> std::io::Result<()> {
+ /// # let tempdir = tempfile::tempdir()?;
+ /// let file = /* file opened with write permissions */
+ /// # OpenOptions::new()
+ /// # .read(true)
+ /// # .write(true)
+ /// # .create(true)
+ /// # .open(tempdir.path()
+ /// # .join("make_mut"))?;
+ /// # file.set_len(128)?;
+ /// let mmap = unsafe { Mmap::map(&file)? };
+ /// // ... use the read-only memory map ...
+ /// let mut mut_mmap = mmap.make_mut()?;
+ /// mut_mmap.deref_mut().write_all(b"hello, world!")?;
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn make_mut(mut self) -> Result<MmapMut> {
+ self.inner.make_mut()?;
+ Ok(MmapMut { inner: self.inner })
+ }
+
+ /// Advise OS how this memory map will be accessed.
+ ///
+ /// Only supported on Unix.
+ ///
+ /// See [madvise()](https://man7.org/linux/man-pages/man2/madvise.2.html) map page.
+ #[cfg(unix)]
+ pub fn advise(&self, advice: Advice) -> Result<()> {
+ self.inner
+ .advise(advice as libc::c_int, 0, self.inner.len())
+ }
+
+ /// Advise OS how this memory map will be accessed.
+ ///
+ /// Used with the [unchecked flags][UncheckedAdvice]. Only supported on Unix.
+ ///
+ /// See [madvise()](https://man7.org/linux/man-pages/man2/madvise.2.html) map page.
+ #[cfg(unix)]
+ pub unsafe fn unchecked_advise(&self, advice: UncheckedAdvice) -> Result<()> {
+ self.inner
+ .advise(advice as libc::c_int, 0, self.inner.len())
+ }
+
+ /// Advise OS how this range of memory map will be accessed.
+ ///
+ /// Only supported on Unix.
+ ///
+ /// The offset and length must be in the bounds of the memory map.
+ ///
+ /// See [madvise()](https://man7.org/linux/man-pages/man2/madvise.2.html) map page.
+ #[cfg(unix)]
+ pub fn advise_range(&self, advice: Advice, offset: usize, len: usize) -> Result<()> {
+ self.inner.advise(advice as libc::c_int, offset, len)
+ }
+
+ /// Advise OS how this range of memory map will be accessed.
+ ///
+ /// Used with the [unchecked flags][UncheckedAdvice]. Only supported on Unix.
+ ///
+ /// The offset and length must be in the bounds of the memory map.
+ ///
+ /// See [madvise()](https://man7.org/linux/man-pages/man2/madvise.2.html) map page.
+ #[cfg(unix)]
+ pub unsafe fn unchecked_advise_range(
+ &self,
+ advice: UncheckedAdvice,
+ offset: usize,
+ len: usize,
+ ) -> Result<()> {
+ self.inner.advise(advice as libc::c_int, offset, len)
+ }
+
+ /// Lock the whole memory map into RAM. Only supported on Unix.
+ ///
+ /// See [mlock()](https://man7.org/linux/man-pages/man2/mlock.2.html) map page.
+ #[cfg(unix)]
+ pub fn lock(&self) -> Result<()> {
+ self.inner.lock()
+ }
+
+ /// Unlock the whole memory map. Only supported on Unix.
+ ///
+ /// See [munlock()](https://man7.org/linux/man-pages/man2/munlock.2.html) map page.
+ #[cfg(unix)]
+ pub fn unlock(&self) -> Result<()> {
+ self.inner.unlock()
+ }
+
+ /// Adjust the size of the memory mapping.
+ ///
+ /// This will try to resize the memory mapping in place. If
+ /// [`RemapOptions::may_move`] is specified it will move the mapping if it
+ /// could not resize in place, otherwise it will error.
+ ///
+ /// Only supported on Linux.
+ ///
+ /// See the [`mremap(2)`] man page.
+ ///
+ /// # Safety
+ ///
+ /// Resizing the memory mapping beyond the end of the mapped file will
+ /// result in UB should you happen to access memory beyond the end of the
+ /// file.
+ ///
+ /// [`mremap(2)`]: https://man7.org/linux/man-pages/man2/mremap.2.html
+ #[cfg(target_os = "linux")]
+ pub unsafe fn remap(&mut self, new_len: usize, options: RemapOptions) -> Result<()> {
+ self.inner.remap(new_len, options)
+ }
+}
+
+#[cfg(feature = "stable_deref_trait")]
+unsafe impl stable_deref_trait::StableDeref for Mmap {}
+
+impl Deref for Mmap {
+ type Target = [u8];
+
+ #[inline]
+ fn deref(&self) -> &[u8] {
+ unsafe { slice::from_raw_parts(self.inner.ptr(), self.inner.len()) }
+ }
+}
+
+impl AsRef<[u8]> for Mmap {
+ #[inline]
+ fn as_ref(&self) -> &[u8] {
+ self.deref()
+ }
+}
+
+impl fmt::Debug for Mmap {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.debug_struct("Mmap")
+ .field("ptr", &self.as_ptr())
+ .field("len", &self.len())
+ .finish()
+ }
+}
+
+/// A handle to a raw memory mapped buffer.
+///
+/// This struct never hands out references to its interior, only raw pointers.
+/// This can be helpful when creating shared memory maps between untrusted processes.
+pub struct MmapRaw {
+ inner: MmapInner,
+}
+
+impl MmapRaw {
+ /// Creates a writeable memory map backed by a file.
+ ///
+ /// This is equivalent to calling `MmapOptions::new().map_raw(file)`.
+ ///
+ /// # Errors
+ ///
+ /// This method returns an error when the underlying system call fails, which can happen for a
+ /// variety of reasons, such as when the file is not open with read and write permissions.
+ pub fn map_raw<T: MmapAsRawDesc>(file: T) -> Result<MmapRaw> {
+ MmapOptions::new().map_raw(file)
+ }
+
+ /// Returns a raw pointer to the memory mapped file.
+ ///
+ /// Before dereferencing this pointer, you have to make sure that the file has not been
+ /// truncated since the memory map was created.
+ /// Avoiding this will not introduce memory safety issues in Rust terms,
+ /// but will cause SIGBUS (or equivalent) signal.
+ #[inline]
+ pub fn as_ptr(&self) -> *const u8 {
+ self.inner.ptr()
+ }
+
+ /// Returns an unsafe mutable pointer to the memory mapped file.
+ ///
+ /// Before dereferencing this pointer, you have to make sure that the file has not been
+ /// truncated since the memory map was created.
+ /// Avoiding this will not introduce memory safety issues in Rust terms,
+ /// but will cause SIGBUS (or equivalent) signal.
+ #[inline]
+ pub fn as_mut_ptr(&self) -> *mut u8 {
+ self.inner.ptr() as _
+ }
+
+ /// Returns the length in bytes of the memory map.
+ ///
+ /// Note that truncating the file can cause the length to change (and render this value unusable).
+ #[inline]
+ pub fn len(&self) -> usize {
+ self.inner.len()
+ }
+
+ /// Flushes outstanding memory map modifications to disk.
+ ///
+ /// When this method returns with a non-error result, all outstanding changes to a file-backed
+ /// memory map are guaranteed to be durably stored. The file's metadata (including last
+ /// modification timestamp) may not be updated.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # extern crate memmap2;
+ /// # extern crate tempfile;
+ /// #
+ /// use std::fs::OpenOptions;
+ /// use std::io::Write;
+ /// use std::path::PathBuf;
+ /// use std::slice;
+ ///
+ /// use memmap2::MmapRaw;
+ ///
+ /// # fn main() -> std::io::Result<()> {
+ /// let tempdir = tempfile::tempdir()?;
+ /// let path: PathBuf = /* path to file */
+ /// # tempdir.path().join("flush");
+ /// let file = OpenOptions::new().read(true).write(true).create(true).open(&path)?;
+ /// file.set_len(128)?;
+ ///
+ /// let mut mmap = unsafe { MmapRaw::map_raw(&file)? };
+ ///
+ /// let mut memory = unsafe { slice::from_raw_parts_mut(mmap.as_mut_ptr(), 128) };
+ /// memory.write_all(b"Hello, world!")?;
+ /// mmap.flush()?;
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn flush(&self) -> Result<()> {
+ let len = self.len();
+ self.inner.flush(0, len)
+ }
+
+ /// Asynchronously flushes outstanding memory map modifications to disk.
+ ///
+ /// This method initiates flushing modified pages to durable storage, but it will not wait for
+ /// the operation to complete before returning. The file's metadata (including last
+ /// modification timestamp) may not be updated.
+ pub fn flush_async(&self) -> Result<()> {
+ let len = self.len();
+ self.inner.flush_async(0, len)
+ }
+
+ /// Flushes outstanding memory map modifications in the range to disk.
+ ///
+ /// The offset and length must be in the bounds of the memory map.
+ ///
+ /// When this method returns with a non-error result, all outstanding changes to a file-backed
+ /// memory in the range are guaranteed to be durable stored. The file's metadata (including
+ /// last modification timestamp) may not be updated. It is not guaranteed the only the changes
+ /// in the specified range are flushed; other outstanding changes to the memory map may be
+ /// flushed as well.
+ pub fn flush_range(&self, offset: usize, len: usize) -> Result<()> {
+ self.inner.flush(offset, len)
+ }
+
+ /// Asynchronously flushes outstanding memory map modifications in the range to disk.
+ ///
+ /// The offset and length must be in the bounds of the memory map.
+ ///
+ /// This method initiates flushing modified pages to durable storage, but it will not wait for
+ /// the operation to complete before returning. The file's metadata (including last
+ /// modification timestamp) may not be updated. It is not guaranteed that the only changes
+ /// flushed are those in the specified range; other outstanding changes to the memory map may
+ /// be flushed as well.
+ pub fn flush_async_range(&self, offset: usize, len: usize) -> Result<()> {
+ self.inner.flush_async(offset, len)
+ }
+
+ /// Advise OS how this memory map will be accessed.
+ ///
+ /// Only supported on Unix.
+ ///
+ /// See [madvise()](https://man7.org/linux/man-pages/man2/madvise.2.html) map page.
+ #[cfg(unix)]
+ pub fn advise(&self, advice: Advice) -> Result<()> {
+ self.inner
+ .advise(advice as libc::c_int, 0, self.inner.len())
+ }
+
+ /// Advise OS how this memory map will be accessed.
+ ///
+ /// Used with the [unchecked flags][UncheckedAdvice]. Only supported on Unix.
+ ///
+ /// See [madvise()](https://man7.org/linux/man-pages/man2/madvise.2.html) map page.
+ #[cfg(unix)]
+ pub unsafe fn unchecked_advise(&self, advice: UncheckedAdvice) -> Result<()> {
+ self.inner
+ .advise(advice as libc::c_int, 0, self.inner.len())
+ }
+
+ /// Advise OS how this range of memory map will be accessed.
+ ///
+ /// The offset and length must be in the bounds of the memory map.
+ ///
+ /// Only supported on Unix.
+ ///
+ /// See [madvise()](https://man7.org/linux/man-pages/man2/madvise.2.html) map page.
+ #[cfg(unix)]
+ pub fn advise_range(&self, advice: Advice, offset: usize, len: usize) -> Result<()> {
+ self.inner.advise(advice as libc::c_int, offset, len)
+ }
+
+ /// Advise OS how this range of memory map will be accessed.
+ ///
+ /// Used with the [unchecked flags][UncheckedAdvice]. Only supported on Unix.
+ ///
+ /// The offset and length must be in the bounds of the memory map.
+ ///
+ /// See [madvise()](https://man7.org/linux/man-pages/man2/madvise.2.html) map page.
+ #[cfg(unix)]
+ pub unsafe fn unchecked_advise_range(
+ &self,
+ advice: UncheckedAdvice,
+ offset: usize,
+ len: usize,
+ ) -> Result<()> {
+ self.inner.advise(advice as libc::c_int, offset, len)
+ }
+
+ /// Lock the whole memory map into RAM. Only supported on Unix.
+ ///
+ /// See [mlock()](https://man7.org/linux/man-pages/man2/mlock.2.html) map page.
+ #[cfg(unix)]
+ pub fn lock(&self) -> Result<()> {
+ self.inner.lock()
+ }
+
+ /// Unlock the whole memory map. Only supported on Unix.
+ ///
+ /// See [munlock()](https://man7.org/linux/man-pages/man2/munlock.2.html) map page.
+ #[cfg(unix)]
+ pub fn unlock(&self) -> Result<()> {
+ self.inner.unlock()
+ }
+
+ /// Adjust the size of the memory mapping.
+ ///
+ /// This will try to resize the memory mapping in place. If
+ /// [`RemapOptions::may_move`] is specified it will move the mapping if it
+ /// could not resize in place, otherwise it will error.
+ ///
+ /// Only supported on Linux.
+ ///
+ /// See the [`mremap(2)`] man page.
+ ///
+ /// # Safety
+ ///
+ /// Resizing the memory mapping beyond the end of the mapped file will
+ /// result in UB should you happen to access memory beyond the end of the
+ /// file.
+ ///
+ /// [`mremap(2)`]: https://man7.org/linux/man-pages/man2/mremap.2.html
+ #[cfg(target_os = "linux")]
+ pub unsafe fn remap(&mut self, new_len: usize, options: RemapOptions) -> Result<()> {
+ self.inner.remap(new_len, options)
+ }
+}
+
+impl fmt::Debug for MmapRaw {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.debug_struct("MmapRaw")
+ .field("ptr", &self.as_ptr())
+ .field("len", &self.len())
+ .finish()
+ }
+}
+
+impl From<Mmap> for MmapRaw {
+ fn from(value: Mmap) -> Self {
+ Self { inner: value.inner }
+ }
+}
+
+impl From<MmapMut> for MmapRaw {
+ fn from(value: MmapMut) -> Self {
+ Self { inner: value.inner }
+ }
+}
+
+/// A handle to a mutable memory mapped buffer.
+///
+/// A file-backed `MmapMut` buffer may be used to read from or write to a file. An anonymous
+/// `MmapMut` buffer may be used any place that an in-memory byte buffer is needed. Use
+/// [`MmapMut::map_mut()`] and [`MmapMut::map_anon()`] to create a mutable memory map of the
+/// respective types, or [`MmapOptions::map_mut()`] and [`MmapOptions::map_anon()`] if non-default
+/// options are required.
+///
+/// A file backed `MmapMut` is created by `&File` reference, and will remain valid even after the
+/// `File` is dropped. In other words, the `MmapMut` handle is completely independent of the `File`
+/// used to create it. For consistency, on some platforms this is achieved by duplicating the
+/// underlying file handle. The memory will be unmapped when the `MmapMut` handle is dropped.
+///
+/// Dereferencing and accessing the bytes of the buffer may result in page faults (e.g. swapping
+/// the mapped pages into physical memory) though the details of this are platform specific.
+///
+/// `Mmap` is [`Sync`] and [`Send`].
+///
+/// See [`Mmap`] for the immutable version.
+///
+/// ## Safety
+///
+/// All file-backed memory map constructors are marked `unsafe` because of the potential for
+/// *Undefined Behavior* (UB) using the map if the underlying file is subsequently modified, in or
+/// out of process. Applications must consider the risk and take appropriate precautions when using
+/// file-backed maps. Solutions such as file permissions, locks or process-private (e.g. unlinked)
+/// files exist but are platform specific and limited.
+pub struct MmapMut {
+ inner: MmapInner,
+}
+
+impl MmapMut {
+ /// Creates a writeable memory map backed by a file.
+ ///
+ /// This is equivalent to calling `MmapOptions::new().map_mut(file)`.
+ ///
+ /// # Errors
+ ///
+ /// This method returns an error when the underlying system call fails, which can happen for a
+ /// variety of reasons, such as when the file is not open with read and write permissions.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # extern crate memmap2;
+ /// # extern crate tempfile;
+ /// #
+ /// use std::fs::OpenOptions;
+ /// use std::path::PathBuf;
+ ///
+ /// use memmap2::MmapMut;
+ /// #
+ /// # fn main() -> std::io::Result<()> {
+ /// # let tempdir = tempfile::tempdir()?;
+ /// let path: PathBuf = /* path to file */
+ /// # tempdir.path().join("map_mut");
+ /// let file = OpenOptions::new()
+ /// .read(true)
+ /// .write(true)
+ /// .create(true)
+ /// .open(&path)?;
+ /// file.set_len(13)?;
+ ///
+ /// let mut mmap = unsafe { MmapMut::map_mut(&file)? };
+ ///
+ /// mmap.copy_from_slice(b"Hello, world!");
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub unsafe fn map_mut<T: MmapAsRawDesc>(file: T) -> Result<MmapMut> {
+ MmapOptions::new().map_mut(file)
+ }
+
+ /// Creates an anonymous memory map.
+ ///
+ /// This is equivalent to calling `MmapOptions::new().len(length).map_anon()`.
+ ///
+ /// # Errors
+ ///
+ /// This method returns an error when the underlying system call fails or
+ /// when `len > isize::MAX`.
+ pub fn map_anon(length: usize) -> Result<MmapMut> {
+ MmapOptions::new().len(length).map_anon()
+ }
+
+ /// Flushes outstanding memory map modifications to disk.
+ ///
+ /// When this method returns with a non-error result, all outstanding changes to a file-backed
+ /// memory map are guaranteed to be durably stored. The file's metadata (including last
+ /// modification timestamp) may not be updated.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # extern crate memmap2;
+ /// # extern crate tempfile;
+ /// #
+ /// use std::fs::OpenOptions;
+ /// use std::io::Write;
+ /// use std::path::PathBuf;
+ ///
+ /// use memmap2::MmapMut;
+ ///
+ /// # fn main() -> std::io::Result<()> {
+ /// # let tempdir = tempfile::tempdir()?;
+ /// let path: PathBuf = /* path to file */
+ /// # tempdir.path().join("flush");
+ /// let file = OpenOptions::new().read(true).write(true).create(true).open(&path)?;
+ /// file.set_len(128)?;
+ ///
+ /// let mut mmap = unsafe { MmapMut::map_mut(&file)? };
+ ///
+ /// (&mut mmap[..]).write_all(b"Hello, world!")?;
+ /// mmap.flush()?;
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn flush(&self) -> Result<()> {
+ let len = self.len();
+ self.inner.flush(0, len)
+ }
+
+ /// Asynchronously flushes outstanding memory map modifications to disk.
+ ///
+ /// This method initiates flushing modified pages to durable storage, but it will not wait for
+ /// the operation to complete before returning. The file's metadata (including last
+ /// modification timestamp) may not be updated.
+ pub fn flush_async(&self) -> Result<()> {
+ let len = self.len();
+ self.inner.flush_async(0, len)
+ }
+
+ /// Flushes outstanding memory map modifications in the range to disk.
+ ///
+ /// The offset and length must be in the bounds of the memory map.
+ ///
+ /// When this method returns with a non-error result, all outstanding changes to a file-backed
+ /// memory in the range are guaranteed to be durable stored. The file's metadata (including
+ /// last modification timestamp) may not be updated. It is not guaranteed the only the changes
+ /// in the specified range are flushed; other outstanding changes to the memory map may be
+ /// flushed as well.
+ pub fn flush_range(&self, offset: usize, len: usize) -> Result<()> {
+ self.inner.flush(offset, len)
+ }
+
+ /// Asynchronously flushes outstanding memory map modifications in the range to disk.
+ ///
+ /// The offset and length must be in the bounds of the memory map.
+ ///
+ /// This method initiates flushing modified pages to durable storage, but it will not wait for
+ /// the operation to complete before returning. The file's metadata (including last
+ /// modification timestamp) may not be updated. It is not guaranteed that the only changes
+ /// flushed are those in the specified range; other outstanding changes to the memory map may
+ /// be flushed as well.
+ pub fn flush_async_range(&self, offset: usize, len: usize) -> Result<()> {
+ self.inner.flush_async(offset, len)
+ }
+
+ /// Returns an immutable version of this memory mapped buffer.
+ ///
+ /// If the memory map is file-backed, the file must have been opened with read permissions.
+ ///
+ /// # Errors
+ ///
+ /// This method returns an error when the underlying system call fails, which can happen for a
+ /// variety of reasons, such as when the file has not been opened with read permissions.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # extern crate memmap2;
+ /// #
+ /// use std::io::Write;
+ /// use std::path::PathBuf;
+ ///
+ /// use memmap2::{Mmap, MmapMut};
+ ///
+ /// # fn main() -> std::io::Result<()> {
+ /// let mut mmap = MmapMut::map_anon(128)?;
+ ///
+ /// (&mut mmap[..]).write(b"Hello, world!")?;
+ ///
+ /// let mmap: Mmap = mmap.make_read_only()?;
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn make_read_only(mut self) -> Result<Mmap> {
+ self.inner.make_read_only()?;
+ Ok(Mmap { inner: self.inner })
+ }
+
+ /// Transition the memory map to be readable and executable.
+ ///
+ /// If the memory map is file-backed, the file must have been opened with execute permissions.
+ ///
+ /// On systems with separate instructions and data caches (a category that includes many ARM
+ /// chips), a platform-specific call may be needed to ensure that the changes are visible to the
+ /// execution unit (e.g. when using this function to implement a JIT compiler). For more
+ /// details, see [this ARM write-up](https://community.arm.com/arm-community-blogs/b/architectures-and-processors-blog/posts/caches-and-self-modifying-code)
+ /// or the `man` page for [`sys_icache_invalidate`](https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man3/sys_icache_invalidate.3.html).
+ ///
+ /// # Errors
+ ///
+ /// This method returns an error when the underlying system call fails, which can happen for a
+ /// variety of reasons, such as when the file has not been opened with execute permissions.
+ pub fn make_exec(mut self) -> Result<Mmap> {
+ self.inner.make_exec()?;
+ Ok(Mmap { inner: self.inner })
+ }
+
+ /// Advise OS how this memory map will be accessed.
+ ///
+ /// Only supported on Unix.
+ ///
+ /// See [madvise()](https://man7.org/linux/man-pages/man2/madvise.2.html) map page.
+ #[cfg(unix)]
+ pub fn advise(&self, advice: Advice) -> Result<()> {
+ self.inner
+ .advise(advice as libc::c_int, 0, self.inner.len())
+ }
+
+ /// Advise OS how this memory map will be accessed.
+ ///
+ /// Used with the [unchecked flags][UncheckedAdvice]. Only supported on Unix.
+ ///
+ /// See [madvise()](https://man7.org/linux/man-pages/man2/madvise.2.html) map page.
+ #[cfg(unix)]
+ pub unsafe fn unchecked_advise(&self, advice: UncheckedAdvice) -> Result<()> {
+ self.inner
+ .advise(advice as libc::c_int, 0, self.inner.len())
+ }
+
+ /// Advise OS how this range of memory map will be accessed.
+ ///
+ /// Only supported on Unix.
+ ///
+ /// The offset and length must be in the bounds of the memory map.
+ ///
+ /// See [madvise()](https://man7.org/linux/man-pages/man2/madvise.2.html) map page.
+ #[cfg(unix)]
+ pub fn advise_range(&self, advice: Advice, offset: usize, len: usize) -> Result<()> {
+ self.inner.advise(advice as libc::c_int, offset, len)
+ }
+
+ /// Advise OS how this range of memory map will be accessed.
+ ///
+ /// Used with the [unchecked flags][UncheckedAdvice]. Only supported on Unix.
+ ///
+ /// The offset and length must be in the bounds of the memory map.
+ ///
+ /// See [madvise()](https://man7.org/linux/man-pages/man2/madvise.2.html) map page.
+ #[cfg(unix)]
+ pub fn unchecked_advise_range(
+ &self,
+ advice: UncheckedAdvice,
+ offset: usize,
+ len: usize,
+ ) -> Result<()> {
+ self.inner.advise(advice as libc::c_int, offset, len)
+ }
+
+ /// Lock the whole memory map into RAM. Only supported on Unix.
+ ///
+ /// See [mlock()](https://man7.org/linux/man-pages/man2/mlock.2.html) map page.
+ #[cfg(unix)]
+ pub fn lock(&self) -> Result<()> {
+ self.inner.lock()
+ }
+
+ /// Unlock the whole memory map. Only supported on Unix.
+ ///
+ /// See [munlock()](https://man7.org/linux/man-pages/man2/munlock.2.html) map page.
+ #[cfg(unix)]
+ pub fn unlock(&self) -> Result<()> {
+ self.inner.unlock()
+ }
+
+ /// Adjust the size of the memory mapping.
+ ///
+ /// This will try to resize the memory mapping in place. If
+ /// [`RemapOptions::may_move`] is specified it will move the mapping if it
+ /// could not resize in place, otherwise it will error.
+ ///
+ /// Only supported on Linux.
+ ///
+ /// See the [`mremap(2)`] man page.
+ ///
+ /// # Safety
+ ///
+ /// Resizing the memory mapping beyond the end of the mapped file will
+ /// result in UB should you happen to access memory beyond the end of the
+ /// file.
+ ///
+ /// [`mremap(2)`]: https://man7.org/linux/man-pages/man2/mremap.2.html
+ #[cfg(target_os = "linux")]
+ pub unsafe fn remap(&mut self, new_len: usize, options: RemapOptions) -> Result<()> {
+ self.inner.remap(new_len, options)
+ }
+}
+
+#[cfg(feature = "stable_deref_trait")]
+unsafe impl stable_deref_trait::StableDeref for MmapMut {}
+
+impl Deref for MmapMut {
+ type Target = [u8];
+
+ #[inline]
+ fn deref(&self) -> &[u8] {
+ unsafe { slice::from_raw_parts(self.inner.ptr(), self.inner.len()) }
+ }
+}
+
+impl DerefMut for MmapMut {
+ #[inline]
+ fn deref_mut(&mut self) -> &mut [u8] {
+ unsafe { slice::from_raw_parts_mut(self.inner.mut_ptr(), self.inner.len()) }
+ }
+}
+
+impl AsRef<[u8]> for MmapMut {
+ #[inline]
+ fn as_ref(&self) -> &[u8] {
+ self.deref()
+ }
+}
+
+impl AsMut<[u8]> for MmapMut {
+ #[inline]
+ fn as_mut(&mut self) -> &mut [u8] {
+ self.deref_mut()
+ }
+}
+
+impl fmt::Debug for MmapMut {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.debug_struct("MmapMut")
+ .field("ptr", &self.as_ptr())
+ .field("len", &self.len())
+ .finish()
+ }
+}
+
+/// Options for [`Mmap::remap`] and [`MmapMut::remap`].
+#[derive(Copy, Clone, Default, Debug)]
+#[cfg(target_os = "linux")]
+pub struct RemapOptions {
+ may_move: bool,
+}
+
+#[cfg(target_os = "linux")]
+impl RemapOptions {
+ /// Creates a mew set of options for resizing a memory map.
+ pub fn new() -> Self {
+ Self::default()
+ }
+
+ /// Controls whether the memory map can be moved if it is not possible to
+ /// resize it in place.
+ ///
+ /// If false then the memory map is guaranteed to remain at the same
+ /// address when being resized but attempting to resize will return an
+ /// error if the new memory map would overlap with something else in the
+ /// current process' memory.
+ ///
+ /// By default this is false.
+ ///
+ /// # `may_move` and `StableDeref`
+ /// If the `stable_deref_trait` feature is enabled then [`Mmap`] and
+ /// [`MmapMut`] implement `StableDeref`. `StableDeref` promises that the
+ /// memory map dereferences to a fixed address, however, calling `remap`
+ /// with `may_move` set may result in the backing memory of the mapping
+ /// being moved to a new address. This may cause UB in other code
+ /// depending on the `StableDeref` guarantees.
+ pub fn may_move(mut self, may_move: bool) -> Self {
+ self.may_move = may_move;
+ self
+ }
+
+ pub(crate) fn into_flags(self) -> libc::c_int {
+ if self.may_move {
+ libc::MREMAP_MAYMOVE
+ } else {
+ 0
+ }
+ }
+}
+
+#[cfg(test)]
+mod test {
+ extern crate tempfile;
+
+ #[cfg(unix)]
+ use crate::advice::Advice;
+ use std::fs::{File, OpenOptions};
+ use std::io::{Read, Write};
+ use std::mem;
+ #[cfg(unix)]
+ use std::os::unix::io::AsRawFd;
+ #[cfg(windows)]
+ use std::os::windows::fs::OpenOptionsExt;
+
+ #[cfg(windows)]
+ const GENERIC_ALL: u32 = 0x10000000;
+
+ use super::{Mmap, MmapMut, MmapOptions};
+
+ #[test]
+ fn map_file() {
+ let expected_len = 128;
+ let tempdir = tempfile::tempdir().unwrap();
+ let path = tempdir.path().join("mmap");
+
+ let file = OpenOptions::new()
+ .read(true)
+ .write(true)
+ .create(true)
+ .open(path)
+ .unwrap();
+
+ file.set_len(expected_len as u64).unwrap();
+
+ let mut mmap = unsafe { MmapMut::map_mut(&file).unwrap() };
+ let len = mmap.len();
+ assert_eq!(expected_len, len);
+
+ let zeros = vec![0; len];
+ let incr: Vec<u8> = (0..len as u8).collect();
+
+ // check that the mmap is empty
+ assert_eq!(&zeros[..], &mmap[..]);
+
+ // write values into the mmap
+ (&mut mmap[..]).write_all(&incr[..]).unwrap();
+
+ // read values back
+ assert_eq!(&incr[..], &mmap[..]);
+ }
+
+ #[test]
+ #[cfg(unix)]
+ fn map_fd() {
+ let expected_len = 128;
+ let tempdir = tempfile::tempdir().unwrap();
+ let path = tempdir.path().join("mmap");
+
+ let file = OpenOptions::new()
+ .read(true)
+ .write(true)
+ .create(true)
+ .open(path)
+ .unwrap();
+
+ file.set_len(expected_len as u64).unwrap();
+
+ let mut mmap = unsafe { MmapMut::map_mut(file.as_raw_fd()).unwrap() };
+ let len = mmap.len();
+ assert_eq!(expected_len, len);
+
+ let zeros = vec![0; len];
+ let incr: Vec<u8> = (0..len as u8).collect();
+
+ // check that the mmap is empty
+ assert_eq!(&zeros[..], &mmap[..]);
+
+ // write values into the mmap
+ (&mut mmap[..]).write_all(&incr[..]).unwrap();
+
+ // read values back
+ assert_eq!(&incr[..], &mmap[..]);
+ }
+
+ /// Checks that "mapping" a 0-length file derefs to an empty slice.
+ #[test]
+ fn map_empty_file() {
+ let tempdir = tempfile::tempdir().unwrap();
+ let path = tempdir.path().join("mmap");
+
+ let file = OpenOptions::new()
+ .read(true)
+ .write(true)
+ .create(true)
+ .open(path)
+ .unwrap();
+ let mmap = unsafe { Mmap::map(&file).unwrap() };
+ assert!(mmap.is_empty());
+ assert_eq!(mmap.as_ptr().align_offset(mem::size_of::<usize>()), 0);
+ let mmap = unsafe { MmapMut::map_mut(&file).unwrap() };
+ assert!(mmap.is_empty());
+ assert_eq!(mmap.as_ptr().align_offset(mem::size_of::<usize>()), 0);
+ }
+
+ #[test]
+ fn map_anon() {
+ let expected_len = 128;
+ let mut mmap = MmapMut::map_anon(expected_len).unwrap();
+ let len = mmap.len();
+ assert_eq!(expected_len, len);
+
+ let zeros = vec![0; len];
+ let incr: Vec<u8> = (0..len as u8).collect();
+
+ // check that the mmap is empty
+ assert_eq!(&zeros[..], &mmap[..]);
+
+ // write values into the mmap
+ (&mut mmap[..]).write_all(&incr[..]).unwrap();
+
+ // read values back
+ assert_eq!(&incr[..], &mmap[..]);
+ }
+
+ #[test]
+ fn map_anon_zero_len() {
+ assert!(MmapOptions::new().map_anon().unwrap().is_empty())
+ }
+
+ #[test]
+ #[cfg(target_pointer_width = "32")]
+ fn map_anon_len_overflow() {
+ let res = MmapMut::map_anon(0x80000000);
+
+ assert_eq!(
+ res.unwrap_err().to_string(),
+ "memory map length overflows isize"
+ );
+ }
+
+ #[test]
+ fn file_write() {
+ let tempdir = tempfile::tempdir().unwrap();
+ let path = tempdir.path().join("mmap");
+
+ let mut file = OpenOptions::new()
+ .read(true)
+ .write(true)
+ .create(true)
+ .open(path)
+ .unwrap();
+ file.set_len(128).unwrap();
+
+ let write = b"abc123";
+ let mut read = [0u8; 6];
+
+ let mut mmap = unsafe { MmapMut::map_mut(&file).unwrap() };
+ (&mut mmap[..]).write_all(write).unwrap();
+ mmap.flush().unwrap();
+
+ file.read_exact(&mut read).unwrap();
+ assert_eq!(write, &read);
+ }
+
+ #[test]
+ fn flush_range() {
+ let tempdir = tempfile::tempdir().unwrap();
+ let path = tempdir.path().join("mmap");
+
+ let file = OpenOptions::new()
+ .read(true)
+ .write(true)
+ .create(true)
+ .open(path)
+ .unwrap();
+ file.set_len(128).unwrap();
+ let write = b"abc123";
+
+ let mut mmap = unsafe {
+ MmapOptions::new()
+ .offset(2)
+ .len(write.len())
+ .map_mut(&file)
+ .unwrap()
+ };
+ (&mut mmap[..]).write_all(write).unwrap();
+ mmap.flush_async_range(0, write.len()).unwrap();
+ mmap.flush_range(0, write.len()).unwrap();
+ }
+
+ #[test]
+ fn map_copy() {
+ let tempdir = tempfile::tempdir().unwrap();
+ let path = tempdir.path().join("mmap");
+
+ let mut file = OpenOptions::new()
+ .read(true)
+ .write(true)
+ .create(true)
+ .open(path)
+ .unwrap();
+ file.set_len(128).unwrap();
+
+ let nulls = b"\0\0\0\0\0\0";
+ let write = b"abc123";
+ let mut read = [0u8; 6];
+
+ let mut mmap = unsafe { MmapOptions::new().map_copy(&file).unwrap() };
+
+ (&mut mmap[..]).write_all(write).unwrap();
+ mmap.flush().unwrap();
+
+ // The mmap contains the write
+ (&mmap[..]).read_exact(&mut read).unwrap();
+ assert_eq!(write, &read);
+
+ // The file does not contain the write
+ file.read_exact(&mut read).unwrap();
+ assert_eq!(nulls, &read);
+
+ // another mmap does not contain the write
+ let mmap2 = unsafe { MmapOptions::new().map(&file).unwrap() };
+ (&mmap2[..]).read_exact(&mut read).unwrap();
+ assert_eq!(nulls, &read);
+ }
+
+ #[test]
+ fn map_copy_read_only() {
+ let tempdir = tempfile::tempdir().unwrap();
+ let path = tempdir.path().join("mmap");
+
+ let file = OpenOptions::new()
+ .read(true)
+ .write(true)
+ .create(true)
+ .open(path)
+ .unwrap();
+ file.set_len(128).unwrap();
+
+ let nulls = b"\0\0\0\0\0\0";
+ let mut read = [0u8; 6];
+
+ let mmap = unsafe { MmapOptions::new().map_copy_read_only(&file).unwrap() };
+ (&mmap[..]).read_exact(&mut read).unwrap();
+ assert_eq!(nulls, &read);
+
+ let mmap2 = unsafe { MmapOptions::new().map(&file).unwrap() };
+ (&mmap2[..]).read_exact(&mut read).unwrap();
+ assert_eq!(nulls, &read);
+ }
+
+ #[test]
+ fn map_offset() {
+ let tempdir = tempfile::tempdir().unwrap();
+ let path = tempdir.path().join("mmap");
+
+ let file = OpenOptions::new()
+ .read(true)
+ .write(true)
+ .create(true)
+ .open(path)
+ .unwrap();
+
+ let offset = u32::MAX as u64 + 2;
+ let len = 5432;
+ file.set_len(offset + len as u64).unwrap();
+
+ // Check inferred length mmap.
+ let mmap = unsafe { MmapOptions::new().offset(offset).map_mut(&file).unwrap() };
+ assert_eq!(len, mmap.len());
+
+ // Check explicit length mmap.
+ let mut mmap = unsafe {
+ MmapOptions::new()
+ .offset(offset)
+ .len(len)
+ .map_mut(&file)
+ .unwrap()
+ };
+ assert_eq!(len, mmap.len());
+
+ let zeros = vec![0; len];
+ let incr: Vec<_> = (0..len).map(|i| i as u8).collect();
+
+ // check that the mmap is empty
+ assert_eq!(&zeros[..], &mmap[..]);
+
+ // write values into the mmap
+ (&mut mmap[..]).write_all(&incr[..]).unwrap();
+
+ // read values back
+ assert_eq!(&incr[..], &mmap[..]);
+ }
+
+ #[test]
+ fn index() {
+ let mut mmap = MmapMut::map_anon(128).unwrap();
+ mmap[0] = 42;
+ assert_eq!(42, mmap[0]);
+ }
+
+ #[test]
+ fn sync_send() {
+ let mmap = MmapMut::map_anon(129).unwrap();
+
+ fn is_sync_send<T>(_val: T)
+ where
+ T: Sync + Send,
+ {
+ }
+
+ is_sync_send(mmap);
+ }
+
+ #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+ fn jit_x86(mut mmap: MmapMut) {
+ mmap[0] = 0xB8; // mov eax, 0xAB
+ mmap[1] = 0xAB;
+ mmap[2] = 0x00;
+ mmap[3] = 0x00;
+ mmap[4] = 0x00;
+ mmap[5] = 0xC3; // ret
+
+ let mmap = mmap.make_exec().expect("make_exec");
+
+ let jitfn: extern "C" fn() -> u8 = unsafe { mem::transmute(mmap.as_ptr()) };
+ assert_eq!(jitfn(), 0xab);
+ }
+
+ #[test]
+ #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+ fn jit_x86_anon() {
+ jit_x86(MmapMut::map_anon(4096).unwrap());
+ }
+
+ #[test]
+ #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+ fn jit_x86_file() {
+ let tempdir = tempfile::tempdir().unwrap();
+ let mut options = OpenOptions::new();
+ #[cfg(windows)]
+ options.access_mode(GENERIC_ALL);
+
+ let file = options
+ .read(true)
+ .write(true)
+ .create(true)
+ .open(tempdir.path().join("jit_x86"))
+ .expect("open");
+
+ file.set_len(4096).expect("set_len");
+ jit_x86(unsafe { MmapMut::map_mut(&file).expect("map_mut") });
+ }
+
+ #[test]
+ fn mprotect_file() {
+ let tempdir = tempfile::tempdir().unwrap();
+ let path = tempdir.path().join("mmap");
+
+ let mut options = OpenOptions::new();
+ #[cfg(windows)]
+ options.access_mode(GENERIC_ALL);
+
+ let mut file = options
+ .read(true)
+ .write(true)
+ .create(true)
+ .open(path)
+ .expect("open");
+ file.set_len(256_u64).expect("set_len");
+
+ let mmap = unsafe { MmapMut::map_mut(&file).expect("map_mut") };
+
+ let mmap = mmap.make_read_only().expect("make_read_only");
+ let mut mmap = mmap.make_mut().expect("make_mut");
+
+ let write = b"abc123";
+ let mut read = [0u8; 6];
+
+ (&mut mmap[..]).write_all(write).unwrap();
+ mmap.flush().unwrap();
+
+ // The mmap contains the write
+ (&mmap[..]).read_exact(&mut read).unwrap();
+ assert_eq!(write, &read);
+
+ // The file should contain the write
+ file.read_exact(&mut read).unwrap();
+ assert_eq!(write, &read);
+
+ // another mmap should contain the write
+ let mmap2 = unsafe { MmapOptions::new().map(&file).unwrap() };
+ (&mmap2[..]).read_exact(&mut read).unwrap();
+ assert_eq!(write, &read);
+
+ let mmap = mmap.make_exec().expect("make_exec");
+
+ drop(mmap);
+ }
+
+ #[test]
+ fn mprotect_copy() {
+ let tempdir = tempfile::tempdir().unwrap();
+ let path = tempdir.path().join("mmap");
+
+ let mut options = OpenOptions::new();
+ #[cfg(windows)]
+ options.access_mode(GENERIC_ALL);
+
+ let mut file = options
+ .read(true)
+ .write(true)
+ .create(true)
+ .open(path)
+ .expect("open");
+ file.set_len(256_u64).expect("set_len");
+
+ let mmap = unsafe { MmapOptions::new().map_copy(&file).expect("map_mut") };
+
+ let mmap = mmap.make_read_only().expect("make_read_only");
+ let mut mmap = mmap.make_mut().expect("make_mut");
+
+ let nulls = b"\0\0\0\0\0\0";
+ let write = b"abc123";
+ let mut read = [0u8; 6];
+
+ (&mut mmap[..]).write_all(write).unwrap();
+ mmap.flush().unwrap();
+
+ // The mmap contains the write
+ (&mmap[..]).read_exact(&mut read).unwrap();
+ assert_eq!(write, &read);
+
+ // The file does not contain the write
+ file.read_exact(&mut read).unwrap();
+ assert_eq!(nulls, &read);
+
+ // another mmap does not contain the write
+ let mmap2 = unsafe { MmapOptions::new().map(&file).unwrap() };
+ (&mmap2[..]).read_exact(&mut read).unwrap();
+ assert_eq!(nulls, &read);
+
+ let mmap = mmap.make_exec().expect("make_exec");
+
+ drop(mmap);
+ }
+
+ #[test]
+ fn mprotect_anon() {
+ let mmap = MmapMut::map_anon(256).expect("map_mut");
+
+ let mmap = mmap.make_read_only().expect("make_read_only");
+ let mmap = mmap.make_mut().expect("make_mut");
+ let mmap = mmap.make_exec().expect("make_exec");
+ drop(mmap);
+ }
+
+ #[test]
+ fn raw() {
+ let tempdir = tempfile::tempdir().unwrap();
+ let path = tempdir.path().join("mmapraw");
+
+ let mut options = OpenOptions::new();
+ let mut file = options
+ .read(true)
+ .write(true)
+ .create(true)
+ .open(path)
+ .expect("open");
+ file.write_all(b"abc123").unwrap();
+ let mmap = MmapOptions::new().map_raw(&file).unwrap();
+ assert_eq!(mmap.len(), 6);
+ assert!(!mmap.as_ptr().is_null());
+ assert_eq!(unsafe { std::ptr::read(mmap.as_ptr()) }, b'a');
+ }
+
+ #[test]
+ fn raw_read_only() {
+ let tempdir = tempfile::tempdir().unwrap();
+ let path = tempdir.path().join("mmaprawro");
+
+ File::create(&path).unwrap().write_all(b"abc123").unwrap();
+
+ let mmap = MmapOptions::new()
+ .map_raw_read_only(&File::open(&path).unwrap())
+ .unwrap();
+
+ assert_eq!(mmap.len(), 6);
+ assert!(!mmap.as_ptr().is_null());
+ assert_eq!(unsafe { std::ptr::read(mmap.as_ptr()) }, b'a');
+ }
+
+ /// Something that relies on StableDeref
+ #[test]
+ #[cfg(feature = "stable_deref_trait")]
+ fn owning_ref() {
+ extern crate owning_ref;
+
+ let mut map = MmapMut::map_anon(128).unwrap();
+ map[10] = 42;
+ let owning = owning_ref::OwningRef::new(map);
+ let sliced = owning.map(|map| &map[10..20]);
+ assert_eq!(42, sliced[0]);
+
+ let map = sliced.into_owner().make_read_only().unwrap();
+ let owning = owning_ref::OwningRef::new(map);
+ let sliced = owning.map(|map| &map[10..20]);
+ assert_eq!(42, sliced[0]);
+ }
+
+ #[test]
+ #[cfg(unix)]
+ fn advise() {
+ let expected_len = 128;
+ let tempdir = tempfile::tempdir().unwrap();
+ let path = tempdir.path().join("mmap_advise");
+
+ let file = OpenOptions::new()
+ .read(true)
+ .write(true)
+ .create(true)
+ .open(path)
+ .unwrap();
+
+ file.set_len(expected_len as u64).unwrap();
+
+ // Test MmapMut::advise
+ let mut mmap = unsafe { MmapMut::map_mut(&file).unwrap() };
+ mmap.advise(Advice::Random)
+ .expect("mmap advising should be supported on unix");
+
+ let len = mmap.len();
+ assert_eq!(expected_len, len);
+
+ let zeros = vec![0; len];
+ let incr: Vec<u8> = (0..len as u8).collect();
+
+ // check that the mmap is empty
+ assert_eq!(&zeros[..], &mmap[..]);
+
+ mmap.advise_range(Advice::Sequential, 0, mmap.len())
+ .expect("mmap advising should be supported on unix");
+
+ // write values into the mmap
+ (&mut mmap[..]).write_all(&incr[..]).unwrap();
+
+ // read values back
+ assert_eq!(&incr[..], &mmap[..]);
+
+ // Set advice and Read from the read-only map
+ let mmap = unsafe { Mmap::map(&file).unwrap() };
+
+ mmap.advise(Advice::Random)
+ .expect("mmap advising should be supported on unix");
+
+ // read values back
+ assert_eq!(&incr[..], &mmap[..]);
+ }
+
+ #[test]
+ #[cfg(target_os = "linux")]
+ fn advise_writes_unsafely() {
+ let page_size = unsafe { libc::sysconf(libc::_SC_PAGESIZE) as usize };
+
+ let mut mmap = MmapMut::map_anon(page_size).unwrap();
+ mmap.as_mut().fill(255);
+ let mmap = mmap.make_read_only().unwrap();
+
+ let a = mmap.as_ref()[0];
+ unsafe {
+ mmap.unchecked_advise(crate::UncheckedAdvice::DontNeed)
+ .unwrap();
+ }
+ let b = mmap.as_ref()[0];
+
+ assert_eq!(a, 255);
+ assert_eq!(b, 0);
+ }
+
+ #[test]
+ #[cfg(target_os = "linux")]
+ fn advise_writes_unsafely_to_part_of_map() {
+ let page_size = unsafe { libc::sysconf(libc::_SC_PAGESIZE) as usize };
+
+ let mut mmap = MmapMut::map_anon(2 * page_size).unwrap();
+ mmap.as_mut().fill(255);
+ let mmap = mmap.make_read_only().unwrap();
+
+ let a = mmap.as_ref()[0];
+ let b = mmap.as_ref()[page_size];
+ unsafe {
+ mmap.unchecked_advise_range(crate::UncheckedAdvice::DontNeed, page_size, page_size)
+ .unwrap();
+ }
+ let c = mmap.as_ref()[0];
+ let d = mmap.as_ref()[page_size];
+
+ assert_eq!(a, 255);
+ assert_eq!(b, 255);
+ assert_eq!(c, 255);
+ assert_eq!(d, 0);
+ }
+
+ /// Returns true if a non-zero amount of memory is locked.
+ #[cfg(target_os = "linux")]
+ fn is_locked() -> bool {
+ let status = &std::fs::read_to_string("/proc/self/status")
+ .expect("/proc/self/status should be available");
+ for line in status.lines() {
+ if line.starts_with("VmLck:") {
+ let numbers = line.replace(|c: char| !c.is_ascii_digit(), "");
+ return numbers != "0";
+ }
+ }
+ panic!("cannot get VmLck information")
+ }
+
+ #[test]
+ #[cfg(unix)]
+ fn lock() {
+ let tempdir = tempfile::tempdir().unwrap();
+ let path = tempdir.path().join("mmap_lock");
+
+ let file = OpenOptions::new()
+ .read(true)
+ .write(true)
+ .create(true)
+ .open(path)
+ .unwrap();
+ file.set_len(128).unwrap();
+
+ let mmap = unsafe { Mmap::map(&file).unwrap() };
+ #[cfg(target_os = "linux")]
+ assert!(!is_locked());
+
+ mmap.lock().expect("mmap lock should be supported on unix");
+ #[cfg(target_os = "linux")]
+ assert!(is_locked());
+
+ mmap.lock()
+ .expect("mmap lock again should not cause problems");
+ #[cfg(target_os = "linux")]
+ assert!(is_locked());
+
+ mmap.unlock()
+ .expect("mmap unlock should be supported on unix");
+ #[cfg(target_os = "linux")]
+ assert!(!is_locked());
+
+ mmap.unlock()
+ .expect("mmap unlock again should not cause problems");
+ #[cfg(target_os = "linux")]
+ assert!(!is_locked());
+ }
+
+ #[test]
+ #[cfg(target_os = "linux")]
+ fn remap_grow() {
+ use crate::RemapOptions;
+
+ let initial_len = 128;
+ let final_len = 2000;
+
+ let zeros = vec![0u8; final_len];
+ let incr: Vec<u8> = (0..final_len).map(|v| v as u8).collect();
+
+ let file = tempfile::tempfile().unwrap();
+ file.set_len(final_len as u64).unwrap();
+
+ let mut mmap = unsafe { MmapOptions::new().len(initial_len).map_mut(&file).unwrap() };
+ assert_eq!(mmap.len(), initial_len);
+ assert_eq!(&mmap[..], &zeros[..initial_len]);
+
+ unsafe {
+ mmap.remap(final_len, RemapOptions::new().may_move(true))
+ .unwrap()
+ };
+
+ // The size should have been updated
+ assert_eq!(mmap.len(), final_len);
+
+ // Should still be all zeros
+ assert_eq!(&mmap[..], &zeros);
+
+ // Write out to the whole expanded slice.
+ mmap.copy_from_slice(&incr);
+ }
+
+ #[test]
+ #[cfg(target_os = "linux")]
+ fn remap_shrink() {
+ use crate::RemapOptions;
+
+ let initial_len = 20000;
+ let final_len = 400;
+
+ let incr: Vec<u8> = (0..final_len).map(|v| v as u8).collect();
+
+ let file = tempfile::tempfile().unwrap();
+ file.set_len(initial_len as u64).unwrap();
+
+ let mut mmap = unsafe { MmapMut::map_mut(&file).unwrap() };
+ assert_eq!(mmap.len(), initial_len);
+
+ unsafe { mmap.remap(final_len, RemapOptions::new()).unwrap() };
+ assert_eq!(mmap.len(), final_len);
+
+ // Check that the mmap is still writable along the slice length
+ mmap.copy_from_slice(&incr);
+ }
+
+ #[test]
+ #[cfg(target_os = "linux")]
+ #[cfg(target_pointer_width = "32")]
+ fn remap_len_overflow() {
+ use crate::RemapOptions;
+
+ let file = tempfile::tempfile().unwrap();
+ file.set_len(1024).unwrap();
+ let mut mmap = unsafe { MmapOptions::new().len(1024).map(&file).unwrap() };
+
+ let res = unsafe { mmap.remap(0x80000000, RemapOptions::new().may_move(true)) };
+ assert_eq!(
+ res.unwrap_err().to_string(),
+ "memory map length overflows isize"
+ );
+
+ assert_eq!(mmap.len(), 1024);
+ }
+
+ #[test]
+ #[cfg(target_os = "linux")]
+ fn remap_with_offset() {
+ use crate::RemapOptions;
+
+ let offset = 77;
+ let initial_len = 128;
+ let final_len = 2000;
+
+ let zeros = vec![0u8; final_len];
+ let incr: Vec<u8> = (0..final_len).map(|v| v as u8).collect();
+
+ let file = tempfile::tempfile().unwrap();
+ file.set_len(final_len as u64 + offset).unwrap();
+
+ let mut mmap = unsafe {
+ MmapOptions::new()
+ .len(initial_len)
+ .offset(offset)
+ .map_mut(&file)
+ .unwrap()
+ };
+ assert_eq!(mmap.len(), initial_len);
+ assert_eq!(&mmap[..], &zeros[..initial_len]);
+
+ unsafe {
+ mmap.remap(final_len, RemapOptions::new().may_move(true))
+ .unwrap()
+ };
+
+ // The size should have been updated
+ assert_eq!(mmap.len(), final_len);
+
+ // Should still be all zeros
+ assert_eq!(&mmap[..], &zeros);
+
+ // Write out to the whole expanded slice.
+ mmap.copy_from_slice(&incr);
+ }
+}
diff --git a/third_party/rust/memmap2/src/stub.rs b/third_party/rust/memmap2/src/stub.rs
new file mode 100644
index 0000000000..881983eb18
--- /dev/null
+++ b/third_party/rust/memmap2/src/stub.rs
@@ -0,0 +1,81 @@
+use std::fs::File;
+use std::io;
+
+// A stable alternative to https://doc.rust-lang.org/stable/std/primitive.never.html
+enum Never {}
+
+pub struct MmapInner {
+ never: Never,
+}
+
+impl MmapInner {
+ fn new() -> io::Result<MmapInner> {
+ Err(io::Error::new(
+ io::ErrorKind::Other,
+ "platform not supported",
+ ))
+ }
+
+ pub fn map(_: usize, _: &File, _: u64, _: bool) -> io::Result<MmapInner> {
+ MmapInner::new()
+ }
+
+ pub fn map_exec(_: usize, _: &File, _: u64, _: bool) -> io::Result<MmapInner> {
+ MmapInner::new()
+ }
+
+ pub fn map_mut(_: usize, _: &File, _: u64, _: bool) -> io::Result<MmapInner> {
+ MmapInner::new()
+ }
+
+ pub fn map_copy(_: usize, _: &File, _: u64, _: bool) -> io::Result<MmapInner> {
+ MmapInner::new()
+ }
+
+ pub fn map_copy_read_only(_: usize, _: &File, _: u64, _: bool) -> io::Result<MmapInner> {
+ MmapInner::new()
+ }
+
+ pub fn map_anon(_: usize, _: bool, _: bool, _: Option<u8>) -> io::Result<MmapInner> {
+ MmapInner::new()
+ }
+
+ pub fn flush(&self, _: usize, _: usize) -> io::Result<()> {
+ match self.never {}
+ }
+
+ pub fn flush_async(&self, _: usize, _: usize) -> io::Result<()> {
+ match self.never {}
+ }
+
+ pub fn make_read_only(&mut self) -> io::Result<()> {
+ match self.never {}
+ }
+
+ pub fn make_exec(&mut self) -> io::Result<()> {
+ match self.never {}
+ }
+
+ pub fn make_mut(&mut self) -> io::Result<()> {
+ match self.never {}
+ }
+
+ #[inline]
+ pub fn ptr(&self) -> *const u8 {
+ match self.never {}
+ }
+
+ #[inline]
+ pub fn mut_ptr(&mut self) -> *mut u8 {
+ match self.never {}
+ }
+
+ #[inline]
+ pub fn len(&self) -> usize {
+ match self.never {}
+ }
+}
+
+pub fn file_len(file: &File) -> io::Result<u64> {
+ Ok(file.metadata()?.len())
+}
diff --git a/third_party/rust/memmap2/src/unix.rs b/third_party/rust/memmap2/src/unix.rs
new file mode 100644
index 0000000000..bd8fcf32a8
--- /dev/null
+++ b/third_party/rust/memmap2/src/unix.rs
@@ -0,0 +1,458 @@
+extern crate libc;
+
+use std::fs::File;
+use std::mem::ManuallyDrop;
+use std::os::unix::io::{FromRawFd, RawFd};
+use std::sync::atomic::{AtomicUsize, Ordering};
+use std::{io, ptr};
+
+#[cfg(any(
+ all(target_os = "linux", not(target_arch = "mips")),
+ target_os = "freebsd",
+ target_os = "android"
+))]
+const MAP_STACK: libc::c_int = libc::MAP_STACK;
+
+#[cfg(not(any(
+ all(target_os = "linux", not(target_arch = "mips")),
+ target_os = "freebsd",
+ target_os = "android"
+)))]
+const MAP_STACK: libc::c_int = 0;
+
+#[cfg(any(target_os = "linux", target_os = "android"))]
+const MAP_POPULATE: libc::c_int = libc::MAP_POPULATE;
+
+#[cfg(not(any(target_os = "linux", target_os = "android")))]
+const MAP_POPULATE: libc::c_int = 0;
+
+#[cfg(any(target_os = "linux", target_os = "android"))]
+const MAP_HUGETLB: libc::c_int = libc::MAP_HUGETLB;
+
+#[cfg(target_os = "linux")]
+const MAP_HUGE_MASK: libc::c_int = libc::MAP_HUGE_MASK;
+
+#[cfg(any(target_os = "linux", target_os = "android"))]
+const MAP_HUGE_SHIFT: libc::c_int = libc::MAP_HUGE_SHIFT;
+
+#[cfg(not(any(target_os = "linux", target_os = "android")))]
+const MAP_HUGETLB: libc::c_int = 0;
+
+#[cfg(not(target_os = "linux"))]
+const MAP_HUGE_MASK: libc::c_int = 0;
+
+#[cfg(not(any(target_os = "linux", target_os = "android")))]
+const MAP_HUGE_SHIFT: libc::c_int = 0;
+
+#[cfg(any(
+ target_os = "android",
+ all(target_os = "linux", not(target_env = "musl"))
+))]
+use libc::{mmap64 as mmap, off64_t as off_t};
+
+#[cfg(not(any(
+ target_os = "android",
+ all(target_os = "linux", not(target_env = "musl"))
+)))]
+use libc::{mmap, off_t};
+
+pub struct MmapInner {
+ ptr: *mut libc::c_void,
+ len: usize,
+}
+
+impl MmapInner {
+ /// Creates a new `MmapInner`.
+ ///
+ /// This is a thin wrapper around the `mmap` system call.
+ fn new(
+ len: usize,
+ prot: libc::c_int,
+ flags: libc::c_int,
+ file: RawFd,
+ offset: u64,
+ ) -> io::Result<MmapInner> {
+ let alignment = offset % page_size() as u64;
+ let aligned_offset = offset - alignment;
+
+ let (map_len, map_offset) = Self::adjust_mmap_params(len, alignment as usize)?;
+
+ unsafe {
+ let ptr = mmap(
+ ptr::null_mut(),
+ map_len as libc::size_t,
+ prot,
+ flags,
+ file,
+ aligned_offset as off_t,
+ );
+
+ if ptr == libc::MAP_FAILED {
+ Err(io::Error::last_os_error())
+ } else {
+ Ok(Self::from_raw_parts(ptr, len, map_offset))
+ }
+ }
+ }
+
+ fn adjust_mmap_params(len: usize, alignment: usize) -> io::Result<(usize, usize)> {
+ use std::isize;
+
+ // Rust's slice cannot be larger than isize::MAX.
+ // See https://doc.rust-lang.org/std/slice/fn.from_raw_parts.html
+ //
+ // This is not a problem on 64-bit targets, but on 32-bit one
+ // having a file or an anonymous mapping larger than 2GB is quite normal
+ // and we have to prevent it.
+ //
+ // The code below is essentially the same as in Rust's std:
+ // https://github.com/rust-lang/rust/blob/db78ab70a88a0a5e89031d7ee4eccec835dcdbde/library/alloc/src/raw_vec.rs#L495
+ if std::mem::size_of::<usize>() < 8 && len > isize::MAX as usize {
+ return Err(io::Error::new(
+ io::ErrorKind::InvalidData,
+ "memory map length overflows isize",
+ ));
+ }
+
+ let map_len = len + alignment;
+ let map_offset = alignment;
+
+ // `libc::mmap` does not support zero-size mappings. POSIX defines:
+ //
+ // https://pubs.opengroup.org/onlinepubs/9699919799/functions/mmap.html
+ // > If `len` is zero, `mmap()` shall fail and no mapping shall be established.
+ //
+ // So if we would create such a mapping, crate a one-byte mapping instead:
+ let map_len = map_len.max(1);
+
+ // Note that in that case `MmapInner::len` is still set to zero,
+ // and `Mmap` will still dereferences to an empty slice.
+ //
+ // If this mapping is backed by an empty file, we create a mapping larger than the file.
+ // This is unusual but well-defined. On the same man page, POSIX further defines:
+ //
+ // > The `mmap()` function can be used to map a region of memory that is larger
+ // > than the current size of the object.
+ //
+ // (The object here is the file.)
+ //
+ // > Memory access within the mapping but beyond the current end of the underlying
+ // > objects may result in SIGBUS signals being sent to the process. The reason for this
+ // > is that the size of the object can be manipulated by other processes and can change
+ // > at any moment. The implementation should tell the application that a memory reference
+ // > is outside the object where this can be detected; otherwise, written data may be lost
+ // > and read data may not reflect actual data in the object.
+ //
+ // Because `MmapInner::len` is not incremented, this increment of `aligned_len`
+ // will not allow accesses past the end of the file and will not cause SIGBUS.
+ //
+ // (SIGBUS is still possible by mapping a non-empty file and then truncating it
+ // to a shorter size, but that is unrelated to this handling of empty files.)
+ Ok((map_len, map_offset))
+ }
+
+ /// Get the current memory mapping as a `(ptr, map_len, offset)` tuple.
+ ///
+ /// Note that `map_len` is the length of the memory mapping itself and
+ /// _not_ the one that would be passed to `from_raw_parts`.
+ fn as_mmap_params(&self) -> (*mut libc::c_void, usize, usize) {
+ let offset = self.ptr as usize % page_size();
+ let len = self.len + offset;
+
+ // There are two possible memory layouts we could have, depending on
+ // the length and offset passed when constructing this instance:
+ //
+ // 1. The "normal" memory layout looks like this:
+ //
+ // |<------------------>|<---------------------->|
+ // mmap ptr offset ptr public slice
+ //
+ // That is, we have
+ // - The start of the page-aligned memory mapping returned by mmap,
+ // followed by,
+ // - Some number of bytes that are memory mapped but ignored since
+ // they are before the byte offset requested by the user, followed
+ // by,
+ // - The actual memory mapped slice requested by the user.
+ //
+ // This maps cleanly to a (ptr, len, offset) tuple.
+ //
+ // 2. Then, we have the case where the user requested a zero-length
+ // memory mapping. mmap(2) does not support zero-length mappings so
+ // this crate works around that by actually making a mapping of
+ // length one. This means that we have
+ // - A length zero slice, followed by,
+ // - A single memory mapped byte
+ //
+ // Note that this only happens if the offset within the page is also
+ // zero. Otherwise, we have a memory map of offset bytes and not a
+ // zero-length memory map.
+ //
+ // This doesn't fit cleanly into a (ptr, len, offset) tuple. Instead,
+ // we fudge it slightly: a zero-length memory map turns into a
+ // mapping of length one and can't be told apart outside of this
+ // method without knowing the original length.
+ if len == 0 {
+ (self.ptr, 1, 0)
+ } else {
+ (unsafe { self.ptr.offset(-(offset as isize)) }, len, offset)
+ }
+ }
+
+ /// Construct this `MmapInner` from its raw components
+ ///
+ /// # Safety
+ ///
+ /// - `ptr` must point to the start of memory mapping that can be freed
+ /// using `munmap(2)` (i.e. returned by `mmap(2)` or `mremap(2)`)
+ /// - The memory mapping at `ptr` must have a length of `len + offset`.
+ /// - If `len + offset == 0` then the memory mapping must be of length 1.
+ /// - `offset` must be less than the current page size.
+ unsafe fn from_raw_parts(ptr: *mut libc::c_void, len: usize, offset: usize) -> Self {
+ debug_assert_eq!(ptr as usize % page_size(), 0, "ptr not page-aligned");
+ debug_assert!(offset < page_size(), "offset larger than page size");
+
+ Self {
+ ptr: ptr.add(offset),
+ len,
+ }
+ }
+
+ pub fn map(len: usize, file: RawFd, offset: u64, populate: bool) -> io::Result<MmapInner> {
+ let populate = if populate { MAP_POPULATE } else { 0 };
+ MmapInner::new(
+ len,
+ libc::PROT_READ,
+ libc::MAP_SHARED | populate,
+ file,
+ offset,
+ )
+ }
+
+ pub fn map_exec(len: usize, file: RawFd, offset: u64, populate: bool) -> io::Result<MmapInner> {
+ let populate = if populate { MAP_POPULATE } else { 0 };
+ MmapInner::new(
+ len,
+ libc::PROT_READ | libc::PROT_EXEC,
+ libc::MAP_SHARED | populate,
+ file,
+ offset,
+ )
+ }
+
+ pub fn map_mut(len: usize, file: RawFd, offset: u64, populate: bool) -> io::Result<MmapInner> {
+ let populate = if populate { MAP_POPULATE } else { 0 };
+ MmapInner::new(
+ len,
+ libc::PROT_READ | libc::PROT_WRITE,
+ libc::MAP_SHARED | populate,
+ file,
+ offset,
+ )
+ }
+
+ pub fn map_copy(len: usize, file: RawFd, offset: u64, populate: bool) -> io::Result<MmapInner> {
+ let populate = if populate { MAP_POPULATE } else { 0 };
+ MmapInner::new(
+ len,
+ libc::PROT_READ | libc::PROT_WRITE,
+ libc::MAP_PRIVATE | populate,
+ file,
+ offset,
+ )
+ }
+
+ pub fn map_copy_read_only(
+ len: usize,
+ file: RawFd,
+ offset: u64,
+ populate: bool,
+ ) -> io::Result<MmapInner> {
+ let populate = if populate { MAP_POPULATE } else { 0 };
+ MmapInner::new(
+ len,
+ libc::PROT_READ,
+ libc::MAP_PRIVATE | populate,
+ file,
+ offset,
+ )
+ }
+
+ /// Open an anonymous memory map.
+ pub fn map_anon(
+ len: usize,
+ stack: bool,
+ populate: bool,
+ huge: Option<u8>,
+ ) -> io::Result<MmapInner> {
+ let stack = if stack { MAP_STACK } else { 0 };
+ let populate = if populate { MAP_POPULATE } else { 0 };
+ let hugetlb = if huge.is_some() { MAP_HUGETLB } else { 0 };
+ let offset = huge
+ .map(|mask| ((mask as u64) & (MAP_HUGE_MASK as u64)) << MAP_HUGE_SHIFT)
+ .unwrap_or(0);
+ MmapInner::new(
+ len,
+ libc::PROT_READ | libc::PROT_WRITE,
+ libc::MAP_PRIVATE | libc::MAP_ANON | stack | populate | hugetlb,
+ -1,
+ offset,
+ )
+ }
+
+ pub fn flush(&self, offset: usize, len: usize) -> io::Result<()> {
+ let alignment = (self.ptr as usize + offset) % page_size();
+ let offset = offset as isize - alignment as isize;
+ let len = len + alignment;
+ let result =
+ unsafe { libc::msync(self.ptr.offset(offset), len as libc::size_t, libc::MS_SYNC) };
+ if result == 0 {
+ Ok(())
+ } else {
+ Err(io::Error::last_os_error())
+ }
+ }
+
+ pub fn flush_async(&self, offset: usize, len: usize) -> io::Result<()> {
+ let alignment = (self.ptr as usize + offset) % page_size();
+ let offset = offset as isize - alignment as isize;
+ let len = len + alignment;
+ let result =
+ unsafe { libc::msync(self.ptr.offset(offset), len as libc::size_t, libc::MS_ASYNC) };
+ if result == 0 {
+ Ok(())
+ } else {
+ Err(io::Error::last_os_error())
+ }
+ }
+
+ fn mprotect(&mut self, prot: libc::c_int) -> io::Result<()> {
+ unsafe {
+ let alignment = self.ptr as usize % page_size();
+ let ptr = self.ptr.offset(-(alignment as isize));
+ let len = self.len + alignment;
+ let len = len.max(1);
+ if libc::mprotect(ptr, len, prot) == 0 {
+ Ok(())
+ } else {
+ Err(io::Error::last_os_error())
+ }
+ }
+ }
+
+ pub fn make_read_only(&mut self) -> io::Result<()> {
+ self.mprotect(libc::PROT_READ)
+ }
+
+ pub fn make_exec(&mut self) -> io::Result<()> {
+ self.mprotect(libc::PROT_READ | libc::PROT_EXEC)
+ }
+
+ pub fn make_mut(&mut self) -> io::Result<()> {
+ self.mprotect(libc::PROT_READ | libc::PROT_WRITE)
+ }
+
+ #[inline]
+ pub fn ptr(&self) -> *const u8 {
+ self.ptr as *const u8
+ }
+
+ #[inline]
+ pub fn mut_ptr(&mut self) -> *mut u8 {
+ self.ptr as *mut u8
+ }
+
+ #[inline]
+ pub fn len(&self) -> usize {
+ self.len
+ }
+
+ pub fn advise(&self, advice: libc::c_int, offset: usize, len: usize) -> io::Result<()> {
+ let alignment = (self.ptr as usize + offset) % page_size();
+ let offset = offset as isize - alignment as isize;
+ let len = len + alignment;
+ unsafe {
+ if libc::madvise(self.ptr.offset(offset), len, advice) != 0 {
+ Err(io::Error::last_os_error())
+ } else {
+ Ok(())
+ }
+ }
+ }
+
+ #[cfg(target_os = "linux")]
+ pub fn remap(&mut self, new_len: usize, options: crate::RemapOptions) -> io::Result<()> {
+ let (old_ptr, old_len, offset) = self.as_mmap_params();
+ let (map_len, offset) = Self::adjust_mmap_params(new_len, offset)?;
+
+ unsafe {
+ let new_ptr = libc::mremap(old_ptr, old_len, map_len, options.into_flags());
+
+ if new_ptr == libc::MAP_FAILED {
+ Err(io::Error::last_os_error())
+ } else {
+ // We explicitly don't drop self since the pointer within is no longer valid.
+ ptr::write(self, Self::from_raw_parts(new_ptr, new_len, offset));
+ Ok(())
+ }
+ }
+ }
+
+ pub fn lock(&self) -> io::Result<()> {
+ unsafe {
+ if libc::mlock(self.ptr, self.len) != 0 {
+ Err(io::Error::last_os_error())
+ } else {
+ Ok(())
+ }
+ }
+ }
+
+ pub fn unlock(&self) -> io::Result<()> {
+ unsafe {
+ if libc::munlock(self.ptr, self.len) != 0 {
+ Err(io::Error::last_os_error())
+ } else {
+ Ok(())
+ }
+ }
+ }
+}
+
+impl Drop for MmapInner {
+ fn drop(&mut self) {
+ let (ptr, len, _) = self.as_mmap_params();
+
+ // Any errors during unmapping/closing are ignored as the only way
+ // to report them would be through panicking which is highly discouraged
+ // in Drop impls, c.f. https://github.com/rust-lang/lang-team/issues/97
+ unsafe { libc::munmap(ptr, len as libc::size_t) };
+ }
+}
+
+unsafe impl Sync for MmapInner {}
+unsafe impl Send for MmapInner {}
+
+fn page_size() -> usize {
+ static PAGE_SIZE: AtomicUsize = AtomicUsize::new(0);
+
+ match PAGE_SIZE.load(Ordering::Relaxed) {
+ 0 => {
+ let page_size = unsafe { libc::sysconf(libc::_SC_PAGESIZE) as usize };
+
+ PAGE_SIZE.store(page_size, Ordering::Relaxed);
+
+ page_size
+ }
+ page_size => page_size,
+ }
+}
+
+pub fn file_len(file: RawFd) -> io::Result<u64> {
+ // SAFETY: We must not close the passed-in fd by dropping the File we create,
+ // we ensure this by immediately wrapping it in a ManuallyDrop.
+ unsafe {
+ let file = ManuallyDrop::new(File::from_raw_fd(file));
+ Ok(file.metadata()?.len())
+ }
+}
diff --git a/third_party/rust/memmap2/src/windows.rs b/third_party/rust/memmap2/src/windows.rs
new file mode 100644
index 0000000000..8a870955d2
--- /dev/null
+++ b/third_party/rust/memmap2/src/windows.rs
@@ -0,0 +1,524 @@
+#![allow(non_camel_case_types)]
+#![allow(non_snake_case)]
+
+use std::fs::File;
+use std::mem::ManuallyDrop;
+use std::os::raw::c_void;
+use std::os::windows::io::{FromRawHandle, RawHandle};
+use std::{io, mem, ptr};
+
+type BOOL = i32;
+type WORD = u16;
+type DWORD = u32;
+type WCHAR = u16;
+type HANDLE = *mut c_void;
+type LPHANDLE = *mut HANDLE;
+type LPVOID = *mut c_void;
+type LPCVOID = *const c_void;
+type ULONG_PTR = usize;
+type SIZE_T = ULONG_PTR;
+type LPCWSTR = *const WCHAR;
+type PDWORD = *mut DWORD;
+type DWORD_PTR = ULONG_PTR;
+type LPSECURITY_ATTRIBUTES = *mut SECURITY_ATTRIBUTES;
+type LPSYSTEM_INFO = *mut SYSTEM_INFO;
+
+const INVALID_HANDLE_VALUE: HANDLE = -1isize as HANDLE;
+
+const DUPLICATE_SAME_ACCESS: DWORD = 0x00000002;
+
+const STANDARD_RIGHTS_REQUIRED: DWORD = 0x000F0000;
+
+const SECTION_QUERY: DWORD = 0x0001;
+const SECTION_MAP_WRITE: DWORD = 0x0002;
+const SECTION_MAP_READ: DWORD = 0x0004;
+const SECTION_MAP_EXECUTE: DWORD = 0x0008;
+const SECTION_EXTEND_SIZE: DWORD = 0x0010;
+const SECTION_MAP_EXECUTE_EXPLICIT: DWORD = 0x0020;
+const SECTION_ALL_ACCESS: DWORD = STANDARD_RIGHTS_REQUIRED
+ | SECTION_QUERY
+ | SECTION_MAP_WRITE
+ | SECTION_MAP_READ
+ | SECTION_MAP_EXECUTE
+ | SECTION_EXTEND_SIZE;
+
+const PAGE_READONLY: DWORD = 0x02;
+const PAGE_READWRITE: DWORD = 0x04;
+const PAGE_WRITECOPY: DWORD = 0x08;
+const PAGE_EXECUTE_READ: DWORD = 0x20;
+const PAGE_EXECUTE_READWRITE: DWORD = 0x40;
+const PAGE_EXECUTE_WRITECOPY: DWORD = 0x80;
+
+const FILE_MAP_WRITE: DWORD = SECTION_MAP_WRITE;
+const FILE_MAP_READ: DWORD = SECTION_MAP_READ;
+const FILE_MAP_ALL_ACCESS: DWORD = SECTION_ALL_ACCESS;
+const FILE_MAP_EXECUTE: DWORD = SECTION_MAP_EXECUTE_EXPLICIT;
+const FILE_MAP_COPY: DWORD = 0x00000001;
+
+#[repr(C)]
+struct SECURITY_ATTRIBUTES {
+ nLength: DWORD,
+ lpSecurityDescriptor: LPVOID,
+ bInheritHandle: BOOL,
+}
+
+#[repr(C)]
+struct SYSTEM_INFO {
+ wProcessorArchitecture: WORD,
+ wReserved: WORD,
+ dwPageSize: DWORD,
+ lpMinimumApplicationAddress: LPVOID,
+ lpMaximumApplicationAddress: LPVOID,
+ dwActiveProcessorMask: DWORD_PTR,
+ dwNumberOfProcessors: DWORD,
+ dwProcessorType: DWORD,
+ dwAllocationGranularity: DWORD,
+ wProcessorLevel: WORD,
+ wProcessorRevision: WORD,
+}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct FILETIME {
+ pub dwLowDateTime: DWORD,
+ pub dwHighDateTime: DWORD,
+}
+
+extern "system" {
+ fn GetCurrentProcess() -> HANDLE;
+
+ fn CloseHandle(hObject: HANDLE) -> BOOL;
+
+ fn DuplicateHandle(
+ hSourceProcessHandle: HANDLE,
+ hSourceHandle: HANDLE,
+ hTargetProcessHandle: HANDLE,
+ lpTargetHandle: LPHANDLE,
+ dwDesiredAccess: DWORD,
+ bInheritHandle: BOOL,
+ dwOptions: DWORD,
+ ) -> BOOL;
+
+ fn CreateFileMappingW(
+ hFile: HANDLE,
+ lpFileMappingAttributes: LPSECURITY_ATTRIBUTES,
+ flProtect: DWORD,
+ dwMaximumSizeHigh: DWORD,
+ dwMaximumSizeLow: DWORD,
+ lpName: LPCWSTR,
+ ) -> HANDLE;
+
+ fn FlushFileBuffers(hFile: HANDLE) -> BOOL;
+
+ fn FlushViewOfFile(lpBaseAddress: LPCVOID, dwNumberOfBytesToFlush: SIZE_T) -> BOOL;
+
+ fn UnmapViewOfFile(lpBaseAddress: LPCVOID) -> BOOL;
+
+ fn MapViewOfFile(
+ hFileMappingObject: HANDLE,
+ dwDesiredAccess: DWORD,
+ dwFileOffsetHigh: DWORD,
+ dwFileOffsetLow: DWORD,
+ dwNumberOfBytesToMap: SIZE_T,
+ ) -> LPVOID;
+
+ fn VirtualProtect(
+ lpAddress: LPVOID,
+ dwSize: SIZE_T,
+ flNewProtect: DWORD,
+ lpflOldProtect: PDWORD,
+ ) -> BOOL;
+
+ fn GetSystemInfo(lpSystemInfo: LPSYSTEM_INFO);
+}
+
+/// Returns a fixed aligned pointer that is valid for `slice::from_raw_parts::<u8>` with `len == 0`.
+///
+/// This aligns the pointer to `allocation_granularity()` or 1 if unknown.
+fn empty_slice_ptr() -> *mut c_void {
+ let align = allocation_granularity().max(1);
+ unsafe { mem::transmute(align) }
+}
+
+pub struct MmapInner {
+ handle: Option<RawHandle>,
+ ptr: *mut c_void,
+ len: usize,
+ copy: bool,
+}
+
+impl MmapInner {
+ /// Creates a new `MmapInner`.
+ ///
+ /// This is a thin wrapper around the `CreateFileMappingW` and `MapViewOfFile` system calls.
+ pub fn new(
+ handle: RawHandle,
+ protect: DWORD,
+ access: DWORD,
+ offset: u64,
+ len: usize,
+ copy: bool,
+ ) -> io::Result<MmapInner> {
+ let alignment = offset % allocation_granularity() as u64;
+ let aligned_offset = offset - alignment as u64;
+ let aligned_len = len + alignment as usize;
+ if aligned_len == 0 {
+ // `CreateFileMappingW` documents:
+ //
+ // https://docs.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-createfilemappingw
+ // > An attempt to map a file with a length of 0 (zero) fails with an error code
+ // > of ERROR_FILE_INVALID. Applications should test for files with a length of 0
+ // > (zero) and reject those files.
+ //
+ // For such files, don’t create a mapping at all and use a marker pointer instead.
+ return Ok(MmapInner {
+ handle: None,
+ ptr: empty_slice_ptr(),
+ len: 0,
+ copy,
+ });
+ }
+
+ unsafe {
+ let mapping = CreateFileMappingW(handle, ptr::null_mut(), protect, 0, 0, ptr::null());
+ if mapping.is_null() {
+ return Err(io::Error::last_os_error());
+ }
+
+ let ptr = MapViewOfFile(
+ mapping,
+ access,
+ (aligned_offset >> 16 >> 16) as DWORD,
+ (aligned_offset & 0xffffffff) as DWORD,
+ aligned_len as SIZE_T,
+ );
+ CloseHandle(mapping);
+ if ptr.is_null() {
+ return Err(io::Error::last_os_error());
+ }
+
+ let mut new_handle = 0 as RawHandle;
+ let cur_proc = GetCurrentProcess();
+ let ok = DuplicateHandle(
+ cur_proc,
+ handle,
+ cur_proc,
+ &mut new_handle,
+ 0,
+ 0,
+ DUPLICATE_SAME_ACCESS,
+ );
+ if ok == 0 {
+ UnmapViewOfFile(ptr);
+ return Err(io::Error::last_os_error());
+ }
+
+ Ok(MmapInner {
+ handle: Some(new_handle),
+ ptr: ptr.offset(alignment as isize),
+ len: len as usize,
+ copy,
+ })
+ }
+ }
+
+ pub fn map(
+ len: usize,
+ handle: RawHandle,
+ offset: u64,
+ _populate: bool,
+ ) -> io::Result<MmapInner> {
+ let write = protection_supported(handle, PAGE_READWRITE);
+ let exec = protection_supported(handle, PAGE_EXECUTE_READ);
+ let mut access = FILE_MAP_READ;
+ let protection = match (write, exec) {
+ (true, true) => {
+ access |= FILE_MAP_WRITE | FILE_MAP_EXECUTE;
+ PAGE_EXECUTE_READWRITE
+ }
+ (true, false) => {
+ access |= FILE_MAP_WRITE;
+ PAGE_READWRITE
+ }
+ (false, true) => {
+ access |= FILE_MAP_EXECUTE;
+ PAGE_EXECUTE_READ
+ }
+ (false, false) => PAGE_READONLY,
+ };
+
+ let mut inner = MmapInner::new(handle, protection, access, offset, len, false)?;
+ if write || exec {
+ inner.make_read_only()?;
+ }
+ Ok(inner)
+ }
+
+ pub fn map_exec(
+ len: usize,
+ handle: RawHandle,
+ offset: u64,
+ _populate: bool,
+ ) -> io::Result<MmapInner> {
+ let write = protection_supported(handle, PAGE_READWRITE);
+ let mut access = FILE_MAP_READ | FILE_MAP_EXECUTE;
+ let protection = if write {
+ access |= FILE_MAP_WRITE;
+ PAGE_EXECUTE_READWRITE
+ } else {
+ PAGE_EXECUTE_READ
+ };
+
+ let mut inner = MmapInner::new(handle, protection, access, offset, len, false)?;
+ if write {
+ inner.make_exec()?;
+ }
+ Ok(inner)
+ }
+
+ pub fn map_mut(
+ len: usize,
+ handle: RawHandle,
+ offset: u64,
+ _populate: bool,
+ ) -> io::Result<MmapInner> {
+ let exec = protection_supported(handle, PAGE_EXECUTE_READ);
+ let mut access = FILE_MAP_READ | FILE_MAP_WRITE;
+ let protection = if exec {
+ access |= FILE_MAP_EXECUTE;
+ PAGE_EXECUTE_READWRITE
+ } else {
+ PAGE_READWRITE
+ };
+
+ let mut inner = MmapInner::new(handle, protection, access, offset, len, false)?;
+ if exec {
+ inner.make_mut()?;
+ }
+ Ok(inner)
+ }
+
+ pub fn map_copy(
+ len: usize,
+ handle: RawHandle,
+ offset: u64,
+ _populate: bool,
+ ) -> io::Result<MmapInner> {
+ let exec = protection_supported(handle, PAGE_EXECUTE_READWRITE);
+ let mut access = FILE_MAP_COPY;
+ let protection = if exec {
+ access |= FILE_MAP_EXECUTE;
+ PAGE_EXECUTE_WRITECOPY
+ } else {
+ PAGE_WRITECOPY
+ };
+
+ let mut inner = MmapInner::new(handle, protection, access, offset, len, true)?;
+ if exec {
+ inner.make_mut()?;
+ }
+ Ok(inner)
+ }
+
+ pub fn map_copy_read_only(
+ len: usize,
+ handle: RawHandle,
+ offset: u64,
+ _populate: bool,
+ ) -> io::Result<MmapInner> {
+ let write = protection_supported(handle, PAGE_READWRITE);
+ let exec = protection_supported(handle, PAGE_EXECUTE_READ);
+ let mut access = FILE_MAP_COPY;
+ let protection = if exec {
+ access |= FILE_MAP_EXECUTE;
+ PAGE_EXECUTE_WRITECOPY
+ } else {
+ PAGE_WRITECOPY
+ };
+
+ let mut inner = MmapInner::new(handle, protection, access, offset, len, true)?;
+ if write || exec {
+ inner.make_read_only()?;
+ }
+ Ok(inner)
+ }
+
+ pub fn map_anon(
+ len: usize,
+ _stack: bool,
+ _populate: bool,
+ _huge: Option<u8>,
+ ) -> io::Result<MmapInner> {
+ // Ensure a non-zero length for the underlying mapping
+ let mapped_len = len.max(1);
+ unsafe {
+ // Create a mapping and view with maximum access permissions, then use `VirtualProtect`
+ // to set the actual `Protection`. This way, we can set more permissive protection later
+ // on.
+ // Also see https://msdn.microsoft.com/en-us/library/windows/desktop/aa366537.aspx
+
+ let mapping = CreateFileMappingW(
+ INVALID_HANDLE_VALUE,
+ ptr::null_mut(),
+ PAGE_EXECUTE_READWRITE,
+ (mapped_len >> 16 >> 16) as DWORD,
+ (mapped_len & 0xffffffff) as DWORD,
+ ptr::null(),
+ );
+ if mapping.is_null() {
+ return Err(io::Error::last_os_error());
+ }
+ let access = FILE_MAP_ALL_ACCESS | FILE_MAP_EXECUTE;
+ let ptr = MapViewOfFile(mapping, access, 0, 0, mapped_len as SIZE_T);
+ CloseHandle(mapping);
+
+ if ptr.is_null() {
+ return Err(io::Error::last_os_error());
+ }
+
+ let mut old = 0;
+ let result = VirtualProtect(ptr, mapped_len as SIZE_T, PAGE_READWRITE, &mut old);
+ if result != 0 {
+ Ok(MmapInner {
+ handle: None,
+ ptr,
+ len: len as usize,
+ copy: false,
+ })
+ } else {
+ Err(io::Error::last_os_error())
+ }
+ }
+ }
+
+ pub fn flush(&self, offset: usize, len: usize) -> io::Result<()> {
+ self.flush_async(offset, len)?;
+
+ if let Some(handle) = self.handle {
+ let ok = unsafe { FlushFileBuffers(handle) };
+ if ok == 0 {
+ return Err(io::Error::last_os_error());
+ }
+ }
+
+ Ok(())
+ }
+
+ pub fn flush_async(&self, offset: usize, len: usize) -> io::Result<()> {
+ if self.ptr == empty_slice_ptr() {
+ return Ok(());
+ }
+ let result = unsafe { FlushViewOfFile(self.ptr.add(offset), len as SIZE_T) };
+ if result != 0 {
+ Ok(())
+ } else {
+ Err(io::Error::last_os_error())
+ }
+ }
+
+ fn virtual_protect(&mut self, protect: DWORD) -> io::Result<()> {
+ if self.ptr == empty_slice_ptr() {
+ return Ok(());
+ }
+ unsafe {
+ let alignment = self.ptr as usize % allocation_granularity();
+ let ptr = self.ptr.offset(-(alignment as isize));
+ let aligned_len = self.len as SIZE_T + alignment as SIZE_T;
+
+ let mut old = 0;
+ let result = VirtualProtect(ptr, aligned_len, protect, &mut old);
+
+ if result != 0 {
+ Ok(())
+ } else {
+ Err(io::Error::last_os_error())
+ }
+ }
+ }
+
+ pub fn make_read_only(&mut self) -> io::Result<()> {
+ self.virtual_protect(PAGE_READONLY)
+ }
+
+ pub fn make_exec(&mut self) -> io::Result<()> {
+ if self.copy {
+ self.virtual_protect(PAGE_EXECUTE_WRITECOPY)
+ } else {
+ self.virtual_protect(PAGE_EXECUTE_READ)
+ }
+ }
+
+ pub fn make_mut(&mut self) -> io::Result<()> {
+ if self.copy {
+ self.virtual_protect(PAGE_WRITECOPY)
+ } else {
+ self.virtual_protect(PAGE_READWRITE)
+ }
+ }
+
+ #[inline]
+ pub fn ptr(&self) -> *const u8 {
+ self.ptr as *const u8
+ }
+
+ #[inline]
+ pub fn mut_ptr(&mut self) -> *mut u8 {
+ self.ptr as *mut u8
+ }
+
+ #[inline]
+ pub fn len(&self) -> usize {
+ self.len
+ }
+}
+
+impl Drop for MmapInner {
+ fn drop(&mut self) {
+ if self.ptr == empty_slice_ptr() {
+ return;
+ }
+ let alignment = self.ptr as usize % allocation_granularity();
+ // Any errors during unmapping/closing are ignored as the only way
+ // to report them would be through panicking which is highly discouraged
+ // in Drop impls, c.f. https://github.com/rust-lang/lang-team/issues/97
+ unsafe {
+ let ptr = self.ptr.offset(-(alignment as isize));
+ UnmapViewOfFile(ptr);
+
+ if let Some(handle) = self.handle {
+ CloseHandle(handle);
+ }
+ }
+ }
+}
+
+unsafe impl Sync for MmapInner {}
+unsafe impl Send for MmapInner {}
+
+fn protection_supported(handle: RawHandle, protection: DWORD) -> bool {
+ unsafe {
+ let mapping = CreateFileMappingW(handle, ptr::null_mut(), protection, 0, 0, ptr::null());
+ if mapping.is_null() {
+ return false;
+ }
+ CloseHandle(mapping);
+ true
+ }
+}
+
+fn allocation_granularity() -> usize {
+ unsafe {
+ let mut info = mem::zeroed();
+ GetSystemInfo(&mut info);
+ info.dwAllocationGranularity as usize
+ }
+}
+
+pub fn file_len(handle: RawHandle) -> io::Result<u64> {
+ // SAFETY: We must not close the passed-in fd by dropping the File we create,
+ // we ensure this by immediately wrapping it in a ManuallyDrop.
+ unsafe {
+ let file = ManuallyDrop::new(File::from_raw_handle(handle));
+ Ok(file.metadata()?.len())
+ }
+}