From 6bf0a5cb5034a7e684dcc3500e841785237ce2dd Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 7 Apr 2024 19:32:43 +0200 Subject: Adding upstream version 1:115.7.0. Signed-off-by: Daniel Baumann --- ...evert-llvmorg-14-init-11890-gf86deb18cab6.patch | 173 +++++++++++++++++++++ 1 file changed, 173 insertions(+) create mode 100644 build/build-clang/revert-llvmorg-14-init-11890-gf86deb18cab6.patch (limited to 'build/build-clang/revert-llvmorg-14-init-11890-gf86deb18cab6.patch') diff --git a/build/build-clang/revert-llvmorg-14-init-11890-gf86deb18cab6.patch b/build/build-clang/revert-llvmorg-14-init-11890-gf86deb18cab6.patch new file mode 100644 index 0000000000..9d61910705 --- /dev/null +++ b/build/build-clang/revert-llvmorg-14-init-11890-gf86deb18cab6.patch @@ -0,0 +1,173 @@ +From c8a5013045b5aff8e45418925688ca670545980f Mon Sep 17 00:00:00 2001 +From: Mike Hommey +Date: Fri, 18 Mar 2022 17:58:28 +0900 +Subject: [PATCH] Revert "[lsan] Move out suppression of invalid PCs from + StopTheWorld" + +This reverts commit f86deb18cab6479a0961ade3807e4729f3a27bdf +because of permafail for a sizable amount of ASan test jobs, where the +worker would die without even leaving any logs. + +--- + compiler-rt/lib/lsan/lsan_common.cpp | 108 +++++++++++++++++---------- + 1 file changed, 67 insertions(+), 41 deletions(-) + +diff --git a/compiler-rt/lib/lsan/lsan_common.cpp b/compiler-rt/lib/lsan/lsan_common.cpp +index fd7aa38d99db..658415bce507 100644 +--- a/compiler-rt/lib/lsan/lsan_common.cpp ++++ b/compiler-rt/lib/lsan/lsan_common.cpp +@@ -71,11 +71,9 @@ class LeakSuppressionContext { + SuppressionContext context; + bool suppressed_stacks_sorted = true; + InternalMmapVector suppressed_stacks; +- const LoadedModule *suppress_module = nullptr; + +- void LazyInit(); + Suppression *GetSuppressionForAddr(uptr addr); +- bool SuppressInvalid(const StackTrace &stack); ++ void LazyInit(); + bool SuppressByRule(const StackTrace &stack, uptr hit_count, uptr total_size); + + public: +@@ -126,8 +124,6 @@ void LeakSuppressionContext::LazyInit() { + if (&__lsan_default_suppressions) + context.Parse(__lsan_default_suppressions()); + context.Parse(kStdSuppressions); +- if (flags()->use_tls && flags()->use_ld_allocations) +- suppress_module = GetLinker(); + } + } + +@@ -152,41 +148,6 @@ Suppression *LeakSuppressionContext::GetSuppressionForAddr(uptr addr) { + return s; + } + +-static uptr GetCallerPC(const StackTrace &stack) { +- // The top frame is our malloc/calloc/etc. The next frame is the caller. +- if (stack.size >= 2) +- return stack.trace[1]; +- return 0; +-} +- +-// On Linux, treats all chunks allocated from ld-linux.so as reachable, which +-// covers dynamically allocated TLS blocks, internal dynamic loader's loaded +-// modules accounting etc. +-// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules. +-// They are allocated with a __libc_memalign() call in allocate_and_init() +-// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those +-// blocks, but we can make sure they come from our own allocator by intercepting +-// __libc_memalign(). On top of that, there is no easy way to reach them. Their +-// addresses are stored in a dynamically allocated array (the DTV) which is +-// referenced from the static TLS. Unfortunately, we can't just rely on the DTV +-// being reachable from the static TLS, and the dynamic TLS being reachable from +-// the DTV. This is because the initial DTV is allocated before our interception +-// mechanism kicks in, and thus we don't recognize it as allocated memory. We +-// can't special-case it either, since we don't know its size. +-// Our solution is to include in the root set all allocations made from +-// ld-linux.so (which is where allocate_and_init() is implemented). This is +-// guaranteed to include all dynamic TLS blocks (and possibly other allocations +-// which we don't care about). +-// On all other platforms, this simply checks to ensure that the caller pc is +-// valid before reporting chunks as leaked. +-bool LeakSuppressionContext::SuppressInvalid(const StackTrace &stack) { +- uptr caller_pc = GetCallerPC(stack); +- // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark +- // it as reachable, as we can't properly report its allocation stack anyway. +- return !caller_pc || +- (suppress_module && suppress_module->containsAddress(caller_pc)); +-} +- + bool LeakSuppressionContext::SuppressByRule(const StackTrace &stack, + uptr hit_count, uptr total_size) { + for (uptr i = 0; i < stack.size; i++) { +@@ -205,7 +166,7 @@ bool LeakSuppressionContext::Suppress(u32 stack_trace_id, uptr hit_count, + uptr total_size) { + LazyInit(); + StackTrace stack = StackDepotGet(stack_trace_id); +- if (!SuppressInvalid(stack) && !SuppressByRule(stack, hit_count, total_size)) ++ if (!SuppressByRule(stack, hit_count, total_size)) + return false; + suppressed_stacks_sorted = false; + suppressed_stacks.push_back(stack_trace_id); +@@ -569,6 +530,68 @@ static void CollectIgnoredCb(uptr chunk, void *arg) { + } + } + ++static uptr GetCallerPC(const StackTrace &stack) { ++ // The top frame is our malloc/calloc/etc. The next frame is the caller. ++ if (stack.size >= 2) ++ return stack.trace[1]; ++ return 0; ++} ++ ++struct InvalidPCParam { ++ Frontier *frontier; ++ bool skip_linker_allocations; ++}; ++ ++// ForEachChunk callback. If the caller pc is invalid or is within the linker, ++// mark as reachable. Called by ProcessPlatformSpecificAllocations. ++static void MarkInvalidPCCb(uptr chunk, void *arg) { ++ CHECK(arg); ++ InvalidPCParam *param = reinterpret_cast(arg); ++ chunk = GetUserBegin(chunk); ++ LsanMetadata m(chunk); ++ if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) { ++ u32 stack_id = m.stack_trace_id(); ++ uptr caller_pc = 0; ++ if (stack_id > 0) ++ caller_pc = GetCallerPC(StackDepotGet(stack_id)); ++ // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark ++ // it as reachable, as we can't properly report its allocation stack anyway. ++ if (caller_pc == 0 || (param->skip_linker_allocations && ++ GetLinker()->containsAddress(caller_pc))) { ++ m.set_tag(kIgnored); ++ param->frontier->push_back(chunk); ++ } ++ } ++} ++ ++// On Linux, treats all chunks allocated from ld-linux.so as reachable, which ++// covers dynamically allocated TLS blocks, internal dynamic loader's loaded ++// modules accounting etc. ++// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules. ++// They are allocated with a __libc_memalign() call in allocate_and_init() ++// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those ++// blocks, but we can make sure they come from our own allocator by intercepting ++// __libc_memalign(). On top of that, there is no easy way to reach them. Their ++// addresses are stored in a dynamically allocated array (the DTV) which is ++// referenced from the static TLS. Unfortunately, we can't just rely on the DTV ++// being reachable from the static TLS, and the dynamic TLS being reachable from ++// the DTV. This is because the initial DTV is allocated before our interception ++// mechanism kicks in, and thus we don't recognize it as allocated memory. We ++// can't special-case it either, since we don't know its size. ++// Our solution is to include in the root set all allocations made from ++// ld-linux.so (which is where allocate_and_init() is implemented). This is ++// guaranteed to include all dynamic TLS blocks (and possibly other allocations ++// which we don't care about). ++// On all other platforms, this simply checks to ensure that the caller pc is ++// valid before reporting chunks as leaked. ++static void ProcessPC(Frontier *frontier) { ++ InvalidPCParam arg; ++ arg.frontier = frontier; ++ arg.skip_linker_allocations = ++ flags()->use_tls && flags()->use_ld_allocations && GetLinker() != nullptr; ++ ForEachChunk(MarkInvalidPCCb, &arg); ++} ++ + // Sets the appropriate tag on each chunk. + static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads, + Frontier *frontier) { +@@ -584,6 +607,9 @@ static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads, + ProcessRootRegions(frontier); + FloodFillTag(frontier, kReachable); + ++ CHECK_EQ(0, frontier->size()); ++ ProcessPC(frontier); ++ + // The check here is relatively expensive, so we do this in a separate flood + // fill. That way we can skip the check for chunks that are reachable + // otherwise. +-- +2.35.0.1.g829a698654 + -- cgit v1.2.3