From 9835e2ae736235810b4ea1c162ca5e65c547e770 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sat, 18 May 2024 04:49:50 +0200 Subject: Merging upstream version 1.71.1+dfsg1. Signed-off-by: Daniel Baumann --- vendor/redox_syscall/src/io/dma.rs | 61 +++++++++++++++++++++++++------------- 1 file changed, 40 insertions(+), 21 deletions(-) (limited to 'vendor/redox_syscall/src/io') diff --git a/vendor/redox_syscall/src/io/dma.rs b/vendor/redox_syscall/src/io/dma.rs index b356c8abe..0613fc9fc 100644 --- a/vendor/redox_syscall/src/io/dma.rs +++ b/vendor/redox_syscall/src/io/dma.rs @@ -3,7 +3,8 @@ use core::ops::{Deref, DerefMut}; use core::{ptr, slice}; use crate::Result; -use crate::{PartialAllocStrategy, PhysallocFlags}; +use crate::{PartialAllocStrategy, PhysallocFlags, PhysmapFlags}; +use crate::arch::PAGE_SIZE; /// An RAII guard of a physical memory allocation. Currently all physically allocated memory are /// page-aligned and take up at least 4k of space (on x86_64). @@ -13,12 +14,35 @@ pub struct PhysBox { size: usize } +const fn round_up(x: usize) -> usize { + (x + PAGE_SIZE - 1) / PAGE_SIZE * PAGE_SIZE +} +fn assert_aligned(x: usize) { + assert_eq!(x % PAGE_SIZE, 0); +} + +#[cfg(target_arch = "aarch64")] +fn physmap_flags() -> PhysmapFlags { + // aarch64 currently must map DMA memory without caching to ensure coherence + crate::PHYSMAP_NO_CACHE | crate::PHYSMAP_WRITE +} + +#[cfg(any(target_arch = "x86", target_arch = "x86_64"))] +fn physmap_flags() -> PhysmapFlags { + // x86 ensures cache coherence with DMA memory + crate::PHYSMAP_WRITE +} + impl PhysBox { - /// Construct a PhysBox from an address and a size. + /// Construct a PhysBox from an address and a size. The address must be page-aligned, and the + /// size must similarly be a multiple of the page size. /// /// # Safety /// This function is unsafe because when dropping, Self has to a valid allocation. pub unsafe fn from_raw_parts(address: usize, size: usize) -> Self { + assert_aligned(address); + assert_aligned(size); + Self { address, size, @@ -42,12 +66,10 @@ impl PhysBox { pub fn new_with_flags(size: usize, flags: PhysallocFlags) -> Result { assert!(!flags.contains(PhysallocFlags::PARTIAL_ALLOC)); + assert_aligned(size); let address = unsafe { crate::physalloc2(size, flags.bits())? }; - Ok(Self { - address, - size, - }) + Ok(unsafe { Self::from_raw_parts(address, size) }) } /// "Partially" allocate physical memory, in the sense that the allocation may be smaller than @@ -57,21 +79,18 @@ impl PhysBox { /// that first allocation only returns half the size, the driver can do another allocation /// and then let the device use both buffers. pub fn new_partial_allocation(size: usize, flags: PhysallocFlags, strategy: Option, mut min: usize) -> Result { + assert_aligned(size); debug_assert!(!(flags.contains(PhysallocFlags::PARTIAL_ALLOC) && strategy.is_none())); - let address = unsafe { crate::physalloc3(size, flags.bits() | strategy.map(|s| s as usize).unwrap_or(0), &mut min)? }; - Ok(Self { - address, - size: min, - }) + let address = unsafe { crate::physalloc3(size, flags.bits() | strategy.map_or(0, |s| s as usize), &mut min)? }; + Ok(unsafe { Self::from_raw_parts(address, size) }) } pub fn new(size: usize) -> Result { + assert_aligned(size); + let address = unsafe { crate::physalloc(size)? }; - Ok(Self { - address, - size, - }) + Ok(unsafe { Self::from_raw_parts(address, size) }) } } @@ -88,7 +107,7 @@ pub struct Dma { impl Dma { pub fn from_physbox_uninit(phys: PhysBox) -> Result>> { - let virt = unsafe { crate::physmap(phys.address, phys.size, crate::PHYSMAP_WRITE)? } as *mut MaybeUninit; + let virt = unsafe { crate::physmap(phys.address, phys.size, physmap_flags())? } as *mut MaybeUninit; Ok(Dma { phys, @@ -111,11 +130,11 @@ impl Dma { } pub fn new(value: T) -> Result { - let phys = PhysBox::new(mem::size_of::())?; + let phys = PhysBox::new(round_up(mem::size_of::()))?; Self::from_physbox(phys, value) } pub fn zeroed() -> Result>> { - let phys = PhysBox::new(mem::size_of::())?; + let phys = PhysBox::new(round_up(mem::size_of::()))?; Self::from_physbox_zeroed(phys) } } @@ -149,7 +168,7 @@ impl Dma<[T]> { assert!(len <= max_len); Ok(Dma { - virt: unsafe { slice::from_raw_parts_mut(crate::physmap(phys.address, phys.size, crate::PHYSMAP_WRITE)? as *mut MaybeUninit, len) } as *mut [MaybeUninit], + virt: unsafe { slice::from_raw_parts_mut(crate::physmap(phys.address, phys.size, physmap_flags())? as *mut MaybeUninit, len) } as *mut [MaybeUninit], phys, }) } @@ -163,7 +182,7 @@ impl Dma<[T]> { /// * `T` must be properly aligned. /// * `T` must be valid as zeroed (i.e. no NonNull pointers). pub unsafe fn zeroed_unsized(count: usize) -> Result { - let phys = PhysBox::new(mem::size_of::() * count)?; + let phys = PhysBox::new(round_up(mem::size_of::() * count))?; Ok(Self::from_physbox_zeroed_unsized(phys, count)?.assume_init()) } } @@ -195,6 +214,6 @@ impl DerefMut for Dma { impl Drop for Dma { fn drop(&mut self) { unsafe { ptr::drop_in_place(self.virt) } - let _ = unsafe { crate::physunmap(self.virt as *mut u8 as usize) }; + let _ = unsafe { crate::funmap(self.virt as *mut u8 as usize, self.phys.size) }; } } -- cgit v1.2.3