summaryrefslogtreecommitdiffstats
path: root/memory/build/test
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
commit26a029d407be480d791972afb5975cf62c9360a6 (patch)
treef435a8308119effd964b339f76abb83a57c29483 /memory/build/test
parentInitial commit. (diff)
downloadfirefox-26a029d407be480d791972afb5975cf62c9360a6.tar.xz
firefox-26a029d407be480d791972afb5975cf62c9360a6.zip
Adding upstream version 124.0.1.upstream/124.0.1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'memory/build/test')
-rw-r--r--memory/build/test/TestMozJemallocUtils.cpp152
-rw-r--r--memory/build/test/gtest/TestPHC.cpp415
-rw-r--r--memory/build/test/gtest/moz.build15
-rw-r--r--memory/build/test/moz.build21
4 files changed, 603 insertions, 0 deletions
diff --git a/memory/build/test/TestMozJemallocUtils.cpp b/memory/build/test/TestMozJemallocUtils.cpp
new file mode 100644
index 0000000000..06a1a3b5a5
--- /dev/null
+++ b/memory/build/test/TestMozJemallocUtils.cpp
@@ -0,0 +1,152 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// This is a cppunittest, rather than a gtest, in order to assert that no
+// additional DLL needs to be linked in to use the function(s) tested herein.
+
+#include <algorithm>
+#include <iostream>
+#include <optional>
+#include <sstream>
+#include <type_traits>
+
+#include "mozmemory_utils.h"
+#include "mozilla/Likely.h"
+
+static bool TESTS_FAILED = false;
+
+// Introduce iostream output operators for std::optional, for convenience's
+// sake.
+//
+// (This is technically undefined behavior per [namespace.std], but it's
+// unlikely to have any surprising effects when confined to this compilation
+// unit.)
+namespace std {
+template <typename T>
+std::ostream& operator<<(std::ostream& o, std::optional<T> const& s) {
+ if (s) {
+ return o << "std::optional{" << s.value() << "}";
+ }
+ return o << "std::nullopt";
+}
+std::ostream& operator<<(std::ostream& o, std::nullopt_t const& s) {
+ return o << "std::nullopt";
+}
+} // namespace std
+
+// EXPECT_EQ
+//
+// Assert that two expressions are equal. Print them, and their values, on
+// failure. (Based on the GTest macro of the same name.)
+template <typename X, typename Y, size_t Xn, size_t Yn>
+void AssertEqualImpl_(X&& x, Y&& y, const char* file, size_t line,
+ const char (&xStr)[Xn], const char (&yStr)[Yn],
+ const char* explanation = nullptr) {
+ if (MOZ_LIKELY(x == y)) return;
+
+ TESTS_FAILED = true;
+
+ std::stringstream sstr;
+ sstr << file << ':' << line << ": ";
+ if (explanation) sstr << explanation << "\n\t";
+ sstr << "expected " << xStr << " (" << x << ") == " << yStr << " (" << y
+ << ")\n";
+ std::cerr << sstr.str() << std::flush;
+}
+
+#define EXPECT_EQ(x, y) \
+ do { \
+ AssertEqualImpl_(x, y, __FILE__, __LINE__, #x, #y); \
+ } while (0)
+
+// STATIC_ASSERT_VALUE_IS_OF_TYPE
+//
+// Assert that a value `v` is of type `t` (ignoring cv-qualification).
+#define STATIC_ASSERT_VALUE_IS_OF_TYPE(v, t) \
+ static_assert(std::is_same_v<std::remove_cv_t<decltype(v)>, t>)
+
+// MockSleep
+//
+// Mock replacement for ::Sleep that merely logs its calls.
+struct MockSleep {
+ size_t calls = 0;
+ size_t sum = 0;
+
+ void operator()(size_t val) {
+ ++calls;
+ sum += val;
+ }
+
+ bool operator==(MockSleep const& that) const {
+ return calls == that.calls && sum == that.sum;
+ }
+};
+std::ostream& operator<<(std::ostream& o, MockSleep const& s) {
+ return o << "MockSleep { count: " << s.calls << ", sum: " << s.sum << " }";
+}
+
+// MockAlloc
+//
+// Mock memory allocation mechanism. Eventually returns a value.
+template <typename T>
+struct MockAlloc {
+ size_t count;
+ T value;
+
+ std::optional<T> operator()() {
+ if (!count--) return value;
+ return std::nullopt;
+ }
+};
+
+int main() {
+ using mozilla::StallSpecs;
+
+ const StallSpecs stall = {.maxAttempts = 10, .delayMs = 50};
+
+ // semantic test: stalls as requested but still yields a value,
+ // up until it doesn't
+ for (size_t i = 0; i < 20; ++i) {
+ MockSleep sleep;
+ auto const ret =
+ stall.StallAndRetry(sleep, MockAlloc<int>{.count = i, .value = 5});
+ STATIC_ASSERT_VALUE_IS_OF_TYPE(ret, std::optional<int>);
+
+ if (i < 10) {
+ EXPECT_EQ(ret, std::optional<int>(5));
+ } else {
+ EXPECT_EQ(ret, std::nullopt);
+ }
+ size_t const expectedCalls = std::min<size_t>(i + 1, 10);
+ EXPECT_EQ(sleep,
+ (MockSleep{.calls = expectedCalls, .sum = 50 * expectedCalls}));
+ }
+
+ // syntactic test: inline capturing lambda is accepted for aOperation
+ {
+ MockSleep sleep;
+ std::optional<int> value{42};
+ auto const ret = stall.StallAndRetry(sleep, [&]() { return value; });
+
+ STATIC_ASSERT_VALUE_IS_OF_TYPE(ret, std::optional<int>);
+ EXPECT_EQ(ret, std::optional(42));
+ EXPECT_EQ(sleep, (MockSleep{.calls = 1, .sum = 50}));
+ }
+
+ // syntactic test: inline capturing lambda is accepted for aDelayFunc
+ {
+ MockSleep sleep;
+ auto const ret =
+ stall.StallAndRetry([&](size_t time) { sleep(time); },
+ MockAlloc<int>{.count = 0, .value = 105});
+
+ STATIC_ASSERT_VALUE_IS_OF_TYPE(ret, std::optional<int>);
+ EXPECT_EQ(ret, std::optional(105));
+ EXPECT_EQ(sleep, (MockSleep{.calls = 1, .sum = 50}));
+ }
+
+ return TESTS_FAILED ? 1 : 0;
+}
diff --git a/memory/build/test/gtest/TestPHC.cpp b/memory/build/test/gtest/TestPHC.cpp
new file mode 100644
index 0000000000..58d72631cb
--- /dev/null
+++ b/memory/build/test/gtest/TestPHC.cpp
@@ -0,0 +1,415 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gtest/gtest.h"
+
+#include "mozmemory.h"
+#include "mozilla/Assertions.h"
+#include "mozilla/mozalloc.h"
+#include "PHC.h"
+
+using namespace mozilla;
+
+bool PHCInfoEq(phc::AddrInfo& aInfo, phc::AddrInfo::Kind aKind, void* aBaseAddr,
+ size_t aUsableSize, bool aHasAllocStack, bool aHasFreeStack) {
+ return aInfo.mKind == aKind && aInfo.mBaseAddr == aBaseAddr &&
+ aInfo.mUsableSize == aUsableSize &&
+ // Proper stack traces will have at least 3 elements.
+ (aHasAllocStack ? (aInfo.mAllocStack->mLength > 2)
+ : (aInfo.mAllocStack.isNothing())) &&
+ (aHasFreeStack ? (aInfo.mFreeStack->mLength > 2)
+ : (aInfo.mFreeStack.isNothing()));
+}
+
+bool JeInfoEq(jemalloc_ptr_info_t& aInfo, PtrInfoTag aTag, void* aAddr,
+ size_t aSize, arena_id_t arenaId) {
+ return aInfo.tag == aTag && aInfo.addr == aAddr && aInfo.size == aSize
+#ifdef MOZ_DEBUG
+ && aInfo.arenaId == arenaId
+#endif
+ ;
+}
+
+uint8_t* GetPHCAllocation(size_t aSize, size_t aAlignment = 1) {
+ // A crude but effective way to get a PHC allocation.
+ for (int i = 0; i < 2000000; i++) {
+ void* p = (aAlignment == 1) ? moz_xmalloc(aSize)
+ : moz_xmemalign(aAlignment, aSize);
+ if (mozilla::phc::IsPHCAllocation(p, nullptr)) {
+ return (uint8_t*)p;
+ }
+ free(p);
+ }
+ return nullptr;
+}
+
+#if defined(XP_DARWIN) && defined(__aarch64__)
+static const size_t kPageSize = 16384;
+#else
+static const size_t kPageSize = 4096;
+#endif
+
+TEST(PHC, TestPHCAllocations)
+{
+ mozilla::phc::SetPHCState(phc::PHCState::Enabled);
+
+ // First, check that allocations of various sizes all get put at the end of
+ // their page as expected. Also, check their sizes are as expected.
+
+#define ASSERT_POS(n1, n2) \
+ p = (uint8_t*)moz_xrealloc(p, (n1)); \
+ ASSERT_EQ((reinterpret_cast<uintptr_t>(p) & (kPageSize - 1)), \
+ kPageSize - (n2)); \
+ ASSERT_EQ(moz_malloc_usable_size(p), (n2));
+
+ uint8_t* p = GetPHCAllocation(1);
+ if (!p) {
+ MOZ_CRASH("failed to get a PHC allocation");
+ }
+
+ // On Win64 the smallest possible allocation is 16 bytes. On other platforms
+ // it is 8 bytes.
+#if defined(XP_WIN) && defined(HAVE_64BIT_BUILD)
+ ASSERT_POS(8U, 16U);
+#else
+ ASSERT_POS(8U, 8U);
+#endif
+ for (unsigned i = 16; i <= kPageSize; i *= 2) {
+ ASSERT_POS(i, i);
+ }
+
+ free(p);
+
+#undef ASSERT_POS
+
+ // Second, do similar checking with allocations of various alignments. Also
+ // check that their sizes (which are different to allocations with normal
+ // alignment) are the same as the sizes of equivalent non-PHC allocations.
+
+#define ASSERT_ALIGN(a1, a2) \
+ p = (uint8_t*)GetPHCAllocation(8, (a1)); \
+ ASSERT_EQ((reinterpret_cast<uintptr_t>(p) & (kPageSize - 1)), \
+ kPageSize - (a2)); \
+ ASSERT_EQ(moz_malloc_usable_size(p), (a2)); \
+ free(p); \
+ p = (uint8_t*)moz_xmemalign((a1), 8); \
+ ASSERT_EQ(moz_malloc_usable_size(p), (a2)); \
+ free(p);
+
+ // On Win64 the smallest possible allocation is 16 bytes. On other platforms
+ // it is 8 bytes.
+#if defined(XP_WIN) && defined(HAVE_64BIT_BUILD)
+ ASSERT_ALIGN(8U, 16U);
+#else
+ ASSERT_ALIGN(8U, 8U);
+#endif
+ for (unsigned i = 16; i <= kPageSize; i *= 2) {
+ ASSERT_ALIGN(i, i);
+ }
+
+#undef ASSERT_ALIGN
+}
+
+static void TestInUseAllocation(uint8_t* aPtr, size_t aSize) {
+ phc::AddrInfo phcInfo;
+ jemalloc_ptr_info_t jeInfo;
+
+ // Test an in-use PHC allocation: first byte within it.
+ ASSERT_TRUE(mozilla::phc::IsPHCAllocation(aPtr, &phcInfo));
+ ASSERT_TRUE(PHCInfoEq(phcInfo, phc::AddrInfo::Kind::InUsePage, aPtr, aSize,
+ true, false));
+ ASSERT_EQ(moz_malloc_usable_size(aPtr), aSize);
+ jemalloc_ptr_info(aPtr, &jeInfo);
+ ASSERT_TRUE(JeInfoEq(jeInfo, TagLiveAlloc, aPtr, aSize, 0));
+
+ // Test an in-use PHC allocation: last byte within it.
+ ASSERT_TRUE(mozilla::phc::IsPHCAllocation(aPtr + aSize - 1, &phcInfo));
+ ASSERT_TRUE(PHCInfoEq(phcInfo, phc::AddrInfo::Kind::InUsePage, aPtr, aSize,
+ true, false));
+ ASSERT_EQ(moz_malloc_usable_size(aPtr + aSize - 1), aSize);
+ jemalloc_ptr_info(aPtr + aSize - 1, &jeInfo);
+ ASSERT_TRUE(JeInfoEq(jeInfo, TagLiveAlloc, aPtr, aSize, 0));
+
+ // Test an in-use PHC allocation: last byte before it.
+ ASSERT_TRUE(mozilla::phc::IsPHCAllocation(aPtr - 1, &phcInfo));
+ ASSERT_TRUE(PHCInfoEq(phcInfo, phc::AddrInfo::Kind::InUsePage, aPtr, aSize,
+ true, false));
+ ASSERT_EQ(moz_malloc_usable_size(aPtr - 1), 0ul);
+ jemalloc_ptr_info(aPtr - 1, &jeInfo);
+ ASSERT_TRUE(JeInfoEq(jeInfo, TagUnknown, nullptr, 0, 0));
+
+ // Test an in-use PHC allocation: first byte on its allocation page.
+ ASSERT_TRUE(
+ mozilla::phc::IsPHCAllocation(aPtr + aSize - kPageSize, &phcInfo));
+ ASSERT_TRUE(PHCInfoEq(phcInfo, phc::AddrInfo::Kind::InUsePage, aPtr, aSize,
+ true, false));
+ jemalloc_ptr_info(aPtr + aSize - kPageSize, &jeInfo);
+ ASSERT_TRUE(JeInfoEq(jeInfo, TagUnknown, nullptr, 0, 0));
+
+ // Test an in-use PHC allocation: first byte in the following guard page.
+ ASSERT_TRUE(mozilla::phc::IsPHCAllocation(aPtr + aSize, &phcInfo));
+ ASSERT_TRUE(PHCInfoEq(phcInfo, phc::AddrInfo::Kind::GuardPage, aPtr, aSize,
+ true, false));
+ jemalloc_ptr_info(aPtr + aSize, &jeInfo);
+ ASSERT_TRUE(JeInfoEq(jeInfo, TagUnknown, nullptr, 0, 0));
+
+ // Test an in-use PHC allocation: last byte in the lower half of the
+ // following guard page.
+ ASSERT_TRUE(mozilla::phc::IsPHCAllocation(aPtr + aSize + (kPageSize / 2 - 1),
+ &phcInfo));
+ ASSERT_TRUE(PHCInfoEq(phcInfo, phc::AddrInfo::Kind::GuardPage, aPtr, aSize,
+ true, false));
+ jemalloc_ptr_info(aPtr + aSize + (kPageSize / 2 - 1), &jeInfo);
+ ASSERT_TRUE(JeInfoEq(jeInfo, TagUnknown, nullptr, 0, 0));
+
+ // Test an in-use PHC allocation: last byte in the preceding guard page.
+ ASSERT_TRUE(
+ mozilla::phc::IsPHCAllocation(aPtr + aSize - 1 - kPageSize, &phcInfo));
+ ASSERT_TRUE(PHCInfoEq(phcInfo, phc::AddrInfo::Kind::GuardPage, aPtr, aSize,
+ true, false));
+ jemalloc_ptr_info(aPtr + aSize - 1 - kPageSize, &jeInfo);
+ ASSERT_TRUE(JeInfoEq(jeInfo, TagUnknown, nullptr, 0, 0));
+
+ // Test an in-use PHC allocation: first byte in the upper half of the
+ // preceding guard page.
+ ASSERT_TRUE(mozilla::phc::IsPHCAllocation(
+ aPtr + aSize - 1 - kPageSize - (kPageSize / 2 - 1), &phcInfo));
+ ASSERT_TRUE(PHCInfoEq(phcInfo, phc::AddrInfo::Kind::GuardPage, aPtr, aSize,
+ true, false));
+ jemalloc_ptr_info(aPtr + aSize - 1 - kPageSize - (kPageSize / 2 - 1),
+ &jeInfo);
+ ASSERT_TRUE(JeInfoEq(jeInfo, TagUnknown, nullptr, 0, 0));
+}
+
+static void TestFreedAllocation(uint8_t* aPtr, size_t aSize) {
+ phc::AddrInfo phcInfo;
+ jemalloc_ptr_info_t jeInfo;
+
+ // Test a freed PHC allocation: first byte within it.
+ ASSERT_TRUE(mozilla::phc::IsPHCAllocation(aPtr, &phcInfo));
+ ASSERT_TRUE(PHCInfoEq(phcInfo, phc::AddrInfo::Kind::FreedPage, aPtr, aSize,
+ true, true));
+ jemalloc_ptr_info(aPtr, &jeInfo);
+ ASSERT_TRUE(JeInfoEq(jeInfo, TagFreedAlloc, aPtr, aSize, 0));
+
+ // Test a freed PHC allocation: last byte within it.
+ ASSERT_TRUE(mozilla::phc::IsPHCAllocation(aPtr + aSize - 1, &phcInfo));
+ ASSERT_TRUE(PHCInfoEq(phcInfo, phc::AddrInfo::Kind::FreedPage, aPtr, aSize,
+ true, true));
+ jemalloc_ptr_info(aPtr + aSize - 1, &jeInfo);
+ ASSERT_TRUE(JeInfoEq(jeInfo, TagFreedAlloc, aPtr, aSize, 0));
+
+ // Test a freed PHC allocation: last byte before it.
+ ASSERT_TRUE(mozilla::phc::IsPHCAllocation(aPtr - 1, &phcInfo));
+ ASSERT_TRUE(PHCInfoEq(phcInfo, phc::AddrInfo::Kind::FreedPage, aPtr, aSize,
+ true, true));
+ jemalloc_ptr_info(aPtr - 1, &jeInfo);
+ ASSERT_TRUE(JeInfoEq(jeInfo, TagUnknown, nullptr, 0, 0));
+
+ // Test a freed PHC allocation: first byte on its allocation page.
+ ASSERT_TRUE(
+ mozilla::phc::IsPHCAllocation(aPtr + aSize - kPageSize, &phcInfo));
+ ASSERT_TRUE(PHCInfoEq(phcInfo, phc::AddrInfo::Kind::FreedPage, aPtr, aSize,
+ true, true));
+ jemalloc_ptr_info(aPtr + aSize - kPageSize, &jeInfo);
+ ASSERT_TRUE(JeInfoEq(jeInfo, TagUnknown, nullptr, 0, 0));
+
+ // Test a freed PHC allocation: first byte in the following guard page.
+ ASSERT_TRUE(mozilla::phc::IsPHCAllocation(aPtr + aSize, &phcInfo));
+ ASSERT_TRUE(PHCInfoEq(phcInfo, phc::AddrInfo::Kind::GuardPage, aPtr, aSize,
+ true, true));
+ jemalloc_ptr_info(aPtr + aSize, &jeInfo);
+ ASSERT_TRUE(JeInfoEq(jeInfo, TagUnknown, nullptr, 0, 0));
+
+ // Test a freed PHC allocation: last byte in the lower half of the following
+ // guard page.
+ ASSERT_TRUE(mozilla::phc::IsPHCAllocation(aPtr + aSize + (kPageSize / 2 - 1),
+ &phcInfo));
+ ASSERT_TRUE(PHCInfoEq(phcInfo, phc::AddrInfo::Kind::GuardPage, aPtr, aSize,
+ true, true));
+ jemalloc_ptr_info(aPtr + aSize + (kPageSize / 2 - 1), &jeInfo);
+ ASSERT_TRUE(JeInfoEq(jeInfo, TagUnknown, nullptr, 0, 0));
+
+ // Test a freed PHC allocation: last byte in the preceding guard page.
+ ASSERT_TRUE(
+ mozilla::phc::IsPHCAllocation(aPtr + aSize - 1 - kPageSize, &phcInfo));
+ ASSERT_TRUE(PHCInfoEq(phcInfo, phc::AddrInfo::Kind::GuardPage, aPtr, aSize,
+ true, true));
+ jemalloc_ptr_info(aPtr + aSize - 1 - kPageSize, &jeInfo);
+ ASSERT_TRUE(JeInfoEq(jeInfo, TagUnknown, nullptr, 0, 0));
+
+ // Test a freed PHC allocation: first byte in the upper half of the preceding
+ // guard page.
+ ASSERT_TRUE(mozilla::phc::IsPHCAllocation(
+ aPtr + aSize - 1 - kPageSize - (kPageSize / 2 - 1), &phcInfo));
+ ASSERT_TRUE(PHCInfoEq(phcInfo, phc::AddrInfo::Kind::GuardPage, aPtr, aSize,
+ true, true));
+ jemalloc_ptr_info(aPtr + aSize - 1 - kPageSize - (kPageSize / 2 - 1),
+ &jeInfo);
+ ASSERT_TRUE(JeInfoEq(jeInfo, TagUnknown, nullptr, 0, 0));
+}
+
+TEST(PHC, TestPHCInfo)
+{
+ mozilla::phc::SetPHCState(phc::PHCState::Enabled);
+
+ int stackVar;
+ phc::AddrInfo phcInfo;
+
+ // Test a default AddrInfo.
+ ASSERT_TRUE(PHCInfoEq(phcInfo, phc::AddrInfo::Kind::Unknown, nullptr, 0ul,
+ false, false));
+
+ // Test some non-PHC allocation addresses.
+ ASSERT_FALSE(mozilla::phc::IsPHCAllocation(nullptr, &phcInfo));
+ ASSERT_TRUE(PHCInfoEq(phcInfo, phc::AddrInfo::Kind::Unknown, nullptr, 0,
+ false, false));
+ ASSERT_FALSE(mozilla::phc::IsPHCAllocation(&stackVar, &phcInfo));
+ ASSERT_TRUE(PHCInfoEq(phcInfo, phc::AddrInfo::Kind::Unknown, nullptr, 0,
+ false, false));
+
+ uint8_t* p = GetPHCAllocation(32);
+ if (!p) {
+ MOZ_CRASH("failed to get a PHC allocation");
+ }
+
+ TestInUseAllocation(p, 32);
+
+ free(p);
+
+ TestFreedAllocation(p, 32);
+
+ // There are no tests for `mKind == NeverAllocatedPage` because it's not
+ // possible to reliably get ahold of such a page.
+}
+
+TEST(PHC, TestPHCDisablingThread)
+{
+ mozilla::phc::SetPHCState(phc::PHCState::Enabled);
+
+ uint8_t* p = GetPHCAllocation(32);
+ uint8_t* q = GetPHCAllocation(32);
+ if (!p || !q) {
+ MOZ_CRASH("failed to get a PHC allocation");
+ }
+
+ ASSERT_TRUE(mozilla::phc::IsPHCEnabledOnCurrentThread());
+ mozilla::phc::DisablePHCOnCurrentThread();
+ ASSERT_FALSE(mozilla::phc::IsPHCEnabledOnCurrentThread());
+
+ // Test realloc() on a PHC allocation while PHC is disabled on the thread.
+ uint8_t* p2 = (uint8_t*)realloc(p, 128);
+ // The small realloc is fulfilled within the same page, but it does move.
+ ASSERT_TRUE(p2 == p - 96);
+ ASSERT_TRUE(mozilla::phc::IsPHCAllocation(p2, nullptr));
+ uint8_t* p3 = (uint8_t*)realloc(p2, 2 * kPageSize);
+ // The big realloc is not in-place, and the result is not a PHC allocation.
+ ASSERT_TRUE(p3 != p2);
+ ASSERT_FALSE(mozilla::phc::IsPHCAllocation(p3, nullptr));
+ free(p3);
+
+ // Test free() on a PHC allocation while PHC is disabled on the thread.
+ free(q);
+
+ // These must not be PHC allocations.
+ uint8_t* r = GetPHCAllocation(32); // This will fail.
+ ASSERT_FALSE(!!r);
+
+ mozilla::phc::ReenablePHCOnCurrentThread();
+ ASSERT_TRUE(mozilla::phc::IsPHCEnabledOnCurrentThread());
+
+ // If it really was reenabled we should be able to get PHC allocations
+ // again.
+ uint8_t* s = GetPHCAllocation(32); // This should succeed.
+ ASSERT_TRUE(!!s);
+ free(s);
+}
+
+TEST(PHC, TestPHCDisablingGlobal)
+{
+ mozilla::phc::SetPHCState(phc::PHCState::Enabled);
+
+ uint8_t* p1 = GetPHCAllocation(32);
+ uint8_t* p2 = GetPHCAllocation(32);
+ uint8_t* q = GetPHCAllocation(32);
+ if (!p1 || !p2 || !q) {
+ MOZ_CRASH("failed to get a PHC allocation");
+ }
+
+ mozilla::phc::SetPHCState(phc::PHCState::OnlyFree);
+
+ // Test realloc() on a PHC allocation while PHC is disabled on the thread.
+ uint8_t* p3 = (uint8_t*)realloc(p1, 128);
+ // The small realloc is evicted from PHC because in "OnlyFree" state PHC
+ // tries to reduce its memory impact.
+ ASSERT_TRUE(p3 != p1);
+ ASSERT_FALSE(mozilla::phc::IsPHCAllocation(p3, nullptr));
+ free(p3);
+ uint8_t* p4 = (uint8_t*)realloc(p2, 2 * kPageSize);
+ // The big realloc is not in-place, and the result is not a PHC
+ // allocation, regardless of PHC's state.
+ ASSERT_TRUE(p4 != p2);
+ ASSERT_FALSE(mozilla::phc::IsPHCAllocation(p4, nullptr));
+ free(p4);
+
+ // Test free() on a PHC allocation while PHC is disabled on the thread.
+ free(q);
+
+ // These must not be PHC allocations.
+ uint8_t* r = GetPHCAllocation(32); // This will fail.
+ ASSERT_FALSE(!!r);
+
+ mozilla::phc::SetPHCState(phc::PHCState::Enabled);
+
+ // If it really was reenabled we should be able to get PHC allocations
+ // again.
+ uint8_t* s = GetPHCAllocation(32); // This should succeed.
+ ASSERT_TRUE(!!s);
+ free(s);
+}
+
+// This test is disabled for now, see Bug 1845017 and Bug 1845655.
+// TEST(PHC, TestPHCExhaustion)
+// {
+void DisabledPHCExhaustionTest() {
+ // PHC hardcodes the amount of allocations to track.
+#if defined(XP_DARWIN) && defined(__aarch64__)
+ const unsigned NUM_ALLOCATIONS = 1024;
+#else
+ const unsigned NUM_ALLOCATIONS = 4096;
+#endif
+ uint8_t* allocations[NUM_ALLOCATIONS];
+ const unsigned REQUIRED_ALLOCATIONS = NUM_ALLOCATIONS - 50;
+
+ unsigned last_allocation;
+ for (unsigned i = 0; i < NUM_ALLOCATIONS; i++) {
+ allocations[i] = GetPHCAllocation(128);
+ last_allocation = i;
+ if (i < REQUIRED_ALLOCATIONS) {
+ // Assert that the first REQUIRED_ALLOCATIONS work. We require only
+ // REQUIRED_ALLOCATIONS rather than NUM_ALLOCATIONS because sometimes
+ // some PHC slots are used before the test begins.
+ ASSERT_TRUE(allocations[i]);
+ } else if (!allocations[i]) {
+ // Break the loop if an allocation fails to move to the next phase.
+ last_allocation--;
+ break;
+ }
+ TestInUseAllocation(allocations[i], 128);
+ }
+
+ // We should now fail to get an allocation.
+ ASSERT_FALSE(GetPHCAllocation(128));
+
+ for (unsigned i = 0; i <= last_allocation; i++) {
+ free(allocations[i]);
+ TestFreedAllocation(allocations[i], 128);
+ }
+
+ // And now that we've released those allocations we should be able to get
+ // new allocations again.
+ uint8_t* r = GetPHCAllocation(128);
+ ASSERT_TRUE(!!r);
+ free(r);
+}
diff --git a/memory/build/test/gtest/moz.build b/memory/build/test/gtest/moz.build
new file mode 100644
index 0000000000..82ccaaf9c6
--- /dev/null
+++ b/memory/build/test/gtest/moz.build
@@ -0,0 +1,15 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+UNIFIED_SOURCES += [
+ "TestPHC.cpp",
+]
+
+LOCAL_INCLUDES += [
+ "../../",
+]
+
+FINAL_LIBRARY = "xul-gtest"
diff --git a/memory/build/test/moz.build b/memory/build/test/moz.build
new file mode 100644
index 0000000000..d93cc48c65
--- /dev/null
+++ b/memory/build/test/moz.build
@@ -0,0 +1,21 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# We don't link these tests against mozglue, but we do use the STL. Avoid the
+# implicit linking of `__imp_moz_xalloc` in our STL wrappers.
+DisableStlWrapping()
+
+# Important: for these tests to be run, they also need to be added
+# to testing/cppunittest.ini.
+CppUnitTests(
+ [
+ "TestMozJemallocUtils",
+ ]
+)
+# The gtests won't work in a SpiderMonkey-only build or a build without
+# jemalloc.
+if CONFIG["MOZ_PHC"]:
+ TEST_DIRS += ["gtest"]