From def92d1b8e9d373e2f6f27c366d578d97d8960c6 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 15 May 2024 05:34:50 +0200 Subject: Merging upstream version 126.0. Signed-off-by: Daniel Baumann --- mozglue/baseprofiler/core/platform.cpp | 16 ++--- mozglue/static/rust/build.rs | 4 +- mozglue/static/rust/lib.rs | 123 ++++++++++++++++++--------------- 3 files changed, 75 insertions(+), 68 deletions(-) (limited to 'mozglue') diff --git a/mozglue/baseprofiler/core/platform.cpp b/mozglue/baseprofiler/core/platform.cpp index 4f69aadd4a..6080ba88c6 100644 --- a/mozglue/baseprofiler/core/platform.cpp +++ b/mozglue/baseprofiler/core/platform.cpp @@ -1283,9 +1283,9 @@ struct NativeStack { // Merges the profiling stack and native stack, outputting the details to // aCollector. -static void MergeStacks(uint32_t aFeatures, bool aIsSynchronous, +static void MergeStacks(bool aIsSynchronous, const RegisteredThread& aRegisteredThread, - const Registers& aRegs, const NativeStack& aNativeStack, + const NativeStack& aNativeStack, ProfilerStackCollector& aCollector) { // WARNING: this function runs within the profiler's "critical section". // WARNING: this function might be called while the profiler is inactive, and @@ -1695,13 +1695,11 @@ static inline void DoSharedSample( aCaptureOptions == StackCaptureOptions::Full) { DoNativeBacktrace(aLock, aRegisteredThread, aRegs, nativeStack); - MergeStacks(ActivePS::Features(aLock), aIsSynchronous, aRegisteredThread, - aRegs, nativeStack, collector); + MergeStacks(aIsSynchronous, aRegisteredThread, nativeStack, collector); } else #endif { - MergeStacks(ActivePS::Features(aLock), aIsSynchronous, aRegisteredThread, - aRegs, nativeStack, collector); + MergeStacks(aIsSynchronous, aRegisteredThread, nativeStack, collector); // We can't walk the whole native stack, but we can record the top frame. if (aCaptureOptions == StackCaptureOptions::Full) { @@ -3786,13 +3784,11 @@ void profiler_suspend_and_sample_thread(BaseProfilerThreadId aThreadId, # error "Invalid configuration" # endif - MergeStacks(aFeatures, isSynchronous, registeredThread, aRegs, - nativeStack, aCollector); + MergeStacks(isSynchronous, registeredThread, nativeStack, aCollector); } else #endif { - MergeStacks(aFeatures, isSynchronous, registeredThread, aRegs, - nativeStack, aCollector); + MergeStacks(isSynchronous, registeredThread, nativeStack, aCollector); aCollector.CollectNativeLeafAddr((void*)aRegs.mPC); } diff --git a/mozglue/static/rust/build.rs b/mozglue/static/rust/build.rs index e44419fb16..1daa584070 100644 --- a/mozglue/static/rust/build.rs +++ b/mozglue/static/rust/build.rs @@ -16,10 +16,10 @@ fn main() { println!("cargo:rerun-if-changed=wrappers.cpp"); let ver = version().unwrap(); - let max_oom_hook_version = Version::parse("1.77.0-alpha").unwrap(); + let max_oom_hook_version = Version::parse("1.78.0-alpha").unwrap(); // The new alloc error panic feature was temporarily reverted. We kept the // code in tree, but the version here is such that it's effectively never used. - let max_alloc_error_panic_version = Version::parse("1.77.0-alpha").unwrap(); + let max_alloc_error_panic_version = Version::parse("1.78.0-alpha").unwrap(); if ver < max_oom_hook_version { println!("cargo:rustc-cfg=feature=\"oom_with_hook\""); diff --git a/mozglue/static/rust/lib.rs b/mozglue/static/rust/lib.rs index 001b920766..be93d7fc82 100644 --- a/mozglue/static/rust/lib.rs +++ b/mozglue/static/rust/lib.rs @@ -6,10 +6,12 @@ #![cfg_attr(feature = "oom_with_alloc_error_panic", feature(panic_oom_payload))] use arrayvec::ArrayString; +use std::alloc::{GlobalAlloc, Layout}; use std::cmp; use std::ops::Deref; use std::os::raw::c_char; use std::os::raw::c_int; +use std::os::raw::c_void; use std::panic; #[link(name = "wrappers")] @@ -139,83 +141,92 @@ mod oom_hook { } } -#[cfg(feature = "moz_memory")] -mod moz_memory { - use std::alloc::{GlobalAlloc, Layout}; - use std::os::raw::c_void; +extern "C" { + fn malloc(size: usize) -> *mut c_void; - extern "C" { - fn malloc(size: usize) -> *mut c_void; + fn free(ptr: *mut c_void); - fn free(ptr: *mut c_void); + fn calloc(nmemb: usize, size: usize) -> *mut c_void; - fn calloc(nmemb: usize, size: usize) -> *mut c_void; + fn realloc(ptr: *mut c_void, size: usize) -> *mut c_void; - fn realloc(ptr: *mut c_void, size: usize) -> *mut c_void; + #[cfg(windows)] + fn _aligned_malloc(size: usize, align: usize) -> *mut c_void; - #[cfg(windows)] - fn _aligned_malloc(size: usize, align: usize) -> *mut c_void; + #[cfg(windows)] + fn _aligned_free(ptr: *mut c_void); - #[cfg(not(windows))] - fn memalign(align: usize, size: usize) -> *mut c_void; - } + #[cfg(not(windows))] + fn memalign(align: usize, size: usize) -> *mut c_void; +} - #[cfg(windows)] - unsafe fn memalign(align: usize, size: usize) -> *mut c_void { - _aligned_malloc(size, align) - } +#[cfg(windows)] +unsafe fn memalign(align: usize, size: usize) -> *mut c_void { + _aligned_malloc(size, align) +} - pub struct GeckoAlloc; +pub struct GeckoAlloc; - #[inline(always)] - fn need_memalign(layout: Layout) -> bool { - // mozjemalloc guarantees a minimum alignment of 16 for all sizes, except - // for size classes below 16 (4 and 8). - layout.align() > layout.size() || layout.align() > 16 - } +#[inline(always)] +fn need_memalign(layout: Layout) -> bool { + // mozjemalloc guarantees a minimum alignment of 16 for all sizes, except + // for size classes below 16 (4 and 8). + layout.align() > layout.size() || layout.align() > 16 +} - unsafe impl GlobalAlloc for GeckoAlloc { - unsafe fn alloc(&self, layout: Layout) -> *mut u8 { - if need_memalign(layout) { - memalign(layout.align(), layout.size()) as *mut u8 - } else { - malloc(layout.size()) as *mut u8 - } +unsafe impl GlobalAlloc for GeckoAlloc { + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + if need_memalign(layout) { + memalign(layout.align(), layout.size()) as *mut u8 + } else { + malloc(layout.size()) as *mut u8 } + } - unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) { + // On Windows, _aligned_free must be used to free memory allocated with + // _aligned_malloc. Except when mozjemalloc is enabled, in which case + // _aligned_malloc-allocated memory can be freed with free. + #[cfg(all(windows, not(feature = "moz_memory")))] + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { + if need_memalign(layout) { + _aligned_free(ptr as *mut c_void) + } else { free(ptr as *mut c_void) } + } + + #[cfg(any(not(windows), feature = "moz_memory"))] + unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) { + free(ptr as *mut c_void) + } - unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { - if need_memalign(layout) { - let ptr = self.alloc(layout); - if !ptr.is_null() { - std::ptr::write_bytes(ptr, 0, layout.size()); - } - ptr - } else { - calloc(1, layout.size()) as *mut u8 + unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { + if need_memalign(layout) { + let ptr = self.alloc(layout); + if !ptr.is_null() { + std::ptr::write_bytes(ptr, 0, layout.size()); } + ptr + } else { + calloc(1, layout.size()) as *mut u8 } + } - unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { - let new_layout = Layout::from_size_align_unchecked(new_size, layout.align()); - if need_memalign(new_layout) { - let new_ptr = self.alloc(new_layout); - if !new_ptr.is_null() { - let size = std::cmp::min(layout.size(), new_size); - std::ptr::copy_nonoverlapping(ptr, new_ptr, size); - self.dealloc(ptr, layout); - } - new_ptr - } else { - realloc(ptr as *mut c_void, new_size) as *mut u8 + unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { + let new_layout = Layout::from_size_align_unchecked(new_size, layout.align()); + if need_memalign(new_layout) { + let new_ptr = self.alloc(new_layout); + if !new_ptr.is_null() { + let size = std::cmp::min(layout.size(), new_size); + std::ptr::copy_nonoverlapping(ptr, new_ptr, size); + self.dealloc(ptr, layout); } + new_ptr + } else { + realloc(ptr as *mut c_void, new_size) as *mut u8 } } } -#[cfg(feature = "moz_memory")] #[global_allocator] -static A: moz_memory::GeckoAlloc = moz_memory::GeckoAlloc; +static A: GeckoAlloc = GeckoAlloc; -- cgit v1.2.3