summaryrefslogtreecommitdiffstats
path: root/build/build-clang/revert-llvmorg-14-init-11890-gf86deb18cab6_clang_16.patch
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
commit36d22d82aa202bb199967e9512281e9a53db42c9 (patch)
tree105e8c98ddea1c1e4784a60a5a6410fa416be2de /build/build-clang/revert-llvmorg-14-init-11890-gf86deb18cab6_clang_16.patch
parentInitial commit. (diff)
downloadfirefox-esr-36d22d82aa202bb199967e9512281e9a53db42c9.tar.xz
firefox-esr-36d22d82aa202bb199967e9512281e9a53db42c9.zip
Adding upstream version 115.7.0esr.upstream/115.7.0esrupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'build/build-clang/revert-llvmorg-14-init-11890-gf86deb18cab6_clang_16.patch')
-rw-r--r--build/build-clang/revert-llvmorg-14-init-11890-gf86deb18cab6_clang_16.patch180
1 files changed, 180 insertions, 0 deletions
diff --git a/build/build-clang/revert-llvmorg-14-init-11890-gf86deb18cab6_clang_16.patch b/build/build-clang/revert-llvmorg-14-init-11890-gf86deb18cab6_clang_16.patch
new file mode 100644
index 0000000000..f5493527ab
--- /dev/null
+++ b/build/build-clang/revert-llvmorg-14-init-11890-gf86deb18cab6_clang_16.patch
@@ -0,0 +1,180 @@
+From c8a5013045b5aff8e45418925688ca670545980f Mon Sep 17 00:00:00 2001
+From: Mike Hommey <mh@glandium.org>
+Date: Fri, 18 Mar 2022 17:58:28 +0900
+Subject: [PATCH] Revert "[lsan] Move out suppression of invalid PCs from
+ StopTheWorld"
+
+This reverts commit f86deb18cab6479a0961ade3807e4729f3a27bdf
+because of permafail for a sizable amount of ASan test jobs, where the
+worker would die without even leaving any logs.
+
+---
+ compiler-rt/lib/lsan/lsan_common.cpp | 108 +++++++++++++++++----------
+ 1 file changed, 67 insertions(+), 41 deletions(-)
+
+diff --git a/compiler-rt/lib/lsan/lsan_common.cpp b/compiler-rt/lib/lsan/lsan_common.cpp
+index 51218770d6dc..0a69b010879b 100644
+--- a/compiler-rt/lib/lsan/lsan_common.cpp
++++ b/compiler-rt/lib/lsan/lsan_common.cpp
+@@ -83,11 +83,9 @@ class LeakSuppressionContext {
+ SuppressionContext context;
+ bool suppressed_stacks_sorted = true;
+ InternalMmapVector<u32> suppressed_stacks;
+- const LoadedModule *suppress_module = nullptr;
+
+- void LazyInit();
+ Suppression *GetSuppressionForAddr(uptr addr);
+- bool SuppressInvalid(const StackTrace &stack);
++ void LazyInit();
+ bool SuppressByRule(const StackTrace &stack, uptr hit_count, uptr total_size);
+
+ public:
+@@ -138,8 +136,6 @@ void LeakSuppressionContext::LazyInit() {
+ if (&__lsan_default_suppressions)
+ context.Parse(__lsan_default_suppressions());
+ context.Parse(kStdSuppressions);
+- if (flags()->use_tls && flags()->use_ld_allocations)
+- suppress_module = GetLinker();
+ }
+ }
+
+@@ -165,13 +161,6 @@ Suppression *LeakSuppressionContext::GetSuppressionForAddr(uptr addr) {
+ return s;
+ }
+
+-static uptr GetCallerPC(const StackTrace &stack) {
+- // The top frame is our malloc/calloc/etc. The next frame is the caller.
+- if (stack.size >= 2)
+- return stack.trace[1];
+- return 0;
+-}
+-
+ # if SANITIZER_APPLE
+ // Objective-C class data pointers are stored with flags in the low bits, so
+ // they need to be transformed back into something that looks like a pointer.
+@@ -183,34 +172,6 @@ static inline void *MaybeTransformPointer(void *p) {
+ }
+ # endif
+
+-// On Linux, treats all chunks allocated from ld-linux.so as reachable, which
+-// covers dynamically allocated TLS blocks, internal dynamic loader's loaded
+-// modules accounting etc.
+-// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
+-// They are allocated with a __libc_memalign() call in allocate_and_init()
+-// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
+-// blocks, but we can make sure they come from our own allocator by intercepting
+-// __libc_memalign(). On top of that, there is no easy way to reach them. Their
+-// addresses are stored in a dynamically allocated array (the DTV) which is
+-// referenced from the static TLS. Unfortunately, we can't just rely on the DTV
+-// being reachable from the static TLS, and the dynamic TLS being reachable from
+-// the DTV. This is because the initial DTV is allocated before our interception
+-// mechanism kicks in, and thus we don't recognize it as allocated memory. We
+-// can't special-case it either, since we don't know its size.
+-// Our solution is to include in the root set all allocations made from
+-// ld-linux.so (which is where allocate_and_init() is implemented). This is
+-// guaranteed to include all dynamic TLS blocks (and possibly other allocations
+-// which we don't care about).
+-// On all other platforms, this simply checks to ensure that the caller pc is
+-// valid before reporting chunks as leaked.
+-bool LeakSuppressionContext::SuppressInvalid(const StackTrace &stack) {
+- uptr caller_pc = GetCallerPC(stack);
+- // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
+- // it as reachable, as we can't properly report its allocation stack anyway.
+- return !caller_pc ||
+- (suppress_module && suppress_module->containsAddress(caller_pc));
+-}
+-
+ bool LeakSuppressionContext::SuppressByRule(const StackTrace &stack,
+ uptr hit_count, uptr total_size) {
+ for (uptr i = 0; i < stack.size; i++) {
+@@ -229,7 +190,7 @@ bool LeakSuppressionContext::Suppress(u32 stack_trace_id, uptr hit_count,
+ uptr total_size) {
+ LazyInit();
+ StackTrace stack = StackDepotGet(stack_trace_id);
+- if (!SuppressInvalid(stack) && !SuppressByRule(stack, hit_count, total_size))
++ if (!SuppressByRule(stack, hit_count, total_size))
+ return false;
+ suppressed_stacks_sorted = false;
+ suppressed_stacks.push_back(stack_trace_id);
+@@ -600,6 +561,68 @@ static void CollectIgnoredCb(uptr chunk, void *arg) {
+ }
+ }
+
++static uptr GetCallerPC(const StackTrace &stack) {
++ // The top frame is our malloc/calloc/etc. The next frame is the caller.
++ if (stack.size >= 2)
++ return stack.trace[1];
++ return 0;
++}
++
++struct InvalidPCParam {
++ Frontier *frontier;
++ bool skip_linker_allocations;
++};
++
++// ForEachChunk callback. If the caller pc is invalid or is within the linker,
++// mark as reachable. Called by ProcessPlatformSpecificAllocations.
++static void MarkInvalidPCCb(uptr chunk, void *arg) {
++ CHECK(arg);
++ InvalidPCParam *param = reinterpret_cast<InvalidPCParam *>(arg);
++ chunk = GetUserBegin(chunk);
++ LsanMetadata m(chunk);
++ if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) {
++ u32 stack_id = m.stack_trace_id();
++ uptr caller_pc = 0;
++ if (stack_id > 0)
++ caller_pc = GetCallerPC(StackDepotGet(stack_id));
++ // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
++ // it as reachable, as we can't properly report its allocation stack anyway.
++ if (caller_pc == 0 || (param->skip_linker_allocations &&
++ GetLinker()->containsAddress(caller_pc))) {
++ m.set_tag(kIgnored);
++ param->frontier->push_back(chunk);
++ }
++ }
++}
++
++// On Linux, treats all chunks allocated from ld-linux.so as reachable, which
++// covers dynamically allocated TLS blocks, internal dynamic loader's loaded
++// modules accounting etc.
++// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
++// They are allocated with a __libc_memalign() call in allocate_and_init()
++// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
++// blocks, but we can make sure they come from our own allocator by intercepting
++// __libc_memalign(). On top of that, there is no easy way to reach them. Their
++// addresses are stored in a dynamically allocated array (the DTV) which is
++// referenced from the static TLS. Unfortunately, we can't just rely on the DTV
++// being reachable from the static TLS, and the dynamic TLS being reachable from
++// the DTV. This is because the initial DTV is allocated before our interception
++// mechanism kicks in, and thus we don't recognize it as allocated memory. We
++// can't special-case it either, since we don't know its size.
++// Our solution is to include in the root set all allocations made from
++// ld-linux.so (which is where allocate_and_init() is implemented). This is
++// guaranteed to include all dynamic TLS blocks (and possibly other allocations
++// which we don't care about).
++// On all other platforms, this simply checks to ensure that the caller pc is
++// valid before reporting chunks as leaked.
++static void ProcessPC(Frontier *frontier) {
++ InvalidPCParam arg;
++ arg.frontier = frontier;
++ arg.skip_linker_allocations =
++ flags()->use_tls && flags()->use_ld_allocations && GetLinker() != nullptr;
++ ForEachChunk(MarkInvalidPCCb, &arg);
++}
++
+ // Sets the appropriate tag on each chunk.
+ static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
+ Frontier *frontier, tid_t caller_tid,
+@@ -616,6 +639,9 @@ static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
+ ProcessRootRegions(frontier);
+ FloodFillTag(frontier, kReachable);
+
++ CHECK_EQ(0, frontier->size());
++ ProcessPC(frontier);
++
+ // The check here is relatively expensive, so we do this in a separate flood
+ // fill. That way we can skip the check for chunks that are reachable
+ // otherwise.
+--
+2.35.0.1.g829a698654
+