diff options
Diffstat (limited to 'tools/profiler/tests/gtest')
-rw-r--r-- | tools/profiler/tests/gtest/GeckoProfiler.cpp | 5099 | ||||
-rw-r--r-- | tools/profiler/tests/gtest/LulTest.cpp | 51 | ||||
-rw-r--r-- | tools/profiler/tests/gtest/LulTestDwarf.cpp | 2733 | ||||
-rw-r--r-- | tools/profiler/tests/gtest/LulTestInfrastructure.cpp | 498 | ||||
-rw-r--r-- | tools/profiler/tests/gtest/LulTestInfrastructure.h | 736 | ||||
-rw-r--r-- | tools/profiler/tests/gtest/ThreadProfileTest.cpp | 60 | ||||
-rw-r--r-- | tools/profiler/tests/gtest/moz.build | 45 |
7 files changed, 9222 insertions, 0 deletions
diff --git a/tools/profiler/tests/gtest/GeckoProfiler.cpp b/tools/profiler/tests/gtest/GeckoProfiler.cpp new file mode 100644 index 0000000000..78456662f5 --- /dev/null +++ b/tools/profiler/tests/gtest/GeckoProfiler.cpp @@ -0,0 +1,5099 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +// This file tests a lot of the profiler_*() functions in GeckoProfiler.h. +// Most of the tests just check that nothing untoward (e.g. crashes, deadlocks) +// happens when calling these functions. They don't do much inspection of +// profiler internals. + +#include "mozilla/ProfilerThreadPlatformData.h" +#include "mozilla/ProfilerThreadRegistration.h" +#include "mozilla/ProfilerThreadRegistrationInfo.h" +#include "mozilla/ProfilerThreadRegistry.h" +#include "mozilla/ProfilerUtils.h" +#include "mozilla/ProgressLogger.h" +#include "mozilla/UniquePtrExtensions.h" + +#include "nsIThread.h" +#include "nsThreadUtils.h" +#include "prthread.h" + +#include "gtest/gtest.h" +#include "mozilla/gtest/MozAssertions.h" + +#include <thread> + +#if defined(_MSC_VER) || defined(__MINGW32__) +# include <processthreadsapi.h> +# include <realtimeapiset.h> +#elif defined(__APPLE__) +# include <mach/thread_act.h> +#endif + +#ifdef XP_WIN +#include "mozilla/WindowsVersion.h" +#endif + +#ifdef MOZ_GECKO_PROFILER + +# include "GeckoProfiler.h" +# include "mozilla/ProfilerMarkerTypes.h" +# include "mozilla/ProfilerMarkers.h" +# include "NetworkMarker.h" +# include "platform.h" +# include "ProfileBuffer.h" +# include "ProfilerControl.h" + +# include "js/Initialization.h" +# include "js/Printf.h" +# include "jsapi.h" +# include "json/json.h" +# include "mozilla/Atomics.h" +# include "mozilla/BlocksRingBuffer.h" +# include "mozilla/DataMutex.h" +# include "mozilla/ProfileBufferEntrySerializationGeckoExtensions.h" +# include "mozilla/ProfileJSONWriter.h" +# include "mozilla/ScopeExit.h" +# include "mozilla/net/HttpBaseChannel.h" +# include "nsIChannelEventSink.h" +# include "nsIThread.h" +# include "nsThreadUtils.h" + +# include <cstring> +# include <set> + +#endif // MOZ_GECKO_PROFILER + +// Note: profiler_init() has already been called in XRE_main(), so we can't +// test it here. Likewise for profiler_shutdown(), and AutoProfilerInit +// (which is just an RAII wrapper for profiler_init() and profiler_shutdown()). + +using namespace mozilla; + +TEST(GeckoProfiler, ProfilerUtils) +{ + profiler_init_main_thread_id(); + + static_assert(std::is_same_v<decltype(profiler_current_process_id()), + ProfilerProcessId>); + static_assert( + std::is_same_v<decltype(profiler_current_process_id()), + decltype(baseprofiler::profiler_current_process_id())>); + ProfilerProcessId processId = profiler_current_process_id(); + EXPECT_TRUE(processId.IsSpecified()); + EXPECT_EQ(processId, baseprofiler::profiler_current_process_id()); + + static_assert( + std::is_same_v<decltype(profiler_current_thread_id()), ProfilerThreadId>); + static_assert( + std::is_same_v<decltype(profiler_current_thread_id()), + decltype(baseprofiler::profiler_current_thread_id())>); + EXPECT_EQ(profiler_current_thread_id(), + baseprofiler::profiler_current_thread_id()); + + ProfilerThreadId mainTestThreadId = profiler_current_thread_id(); + EXPECT_TRUE(mainTestThreadId.IsSpecified()); + + ProfilerThreadId mainThreadId = profiler_main_thread_id(); + EXPECT_TRUE(mainThreadId.IsSpecified()); + + EXPECT_EQ(mainThreadId, mainTestThreadId) + << "Test should run on the main thread"; + EXPECT_TRUE(profiler_is_main_thread()); + + std::thread testThread([&]() { + EXPECT_EQ(profiler_current_process_id(), processId); + + const ProfilerThreadId testThreadId = profiler_current_thread_id(); + EXPECT_TRUE(testThreadId.IsSpecified()); + EXPECT_NE(testThreadId, mainThreadId); + EXPECT_FALSE(profiler_is_main_thread()); + + EXPECT_EQ(baseprofiler::profiler_current_process_id(), processId); + EXPECT_EQ(baseprofiler::profiler_current_thread_id(), testThreadId); + EXPECT_EQ(baseprofiler::profiler_main_thread_id(), mainThreadId); + EXPECT_FALSE(baseprofiler::profiler_is_main_thread()); + }); + testThread.join(); +} + +TEST(GeckoProfiler, ThreadRegistrationInfo) +{ + profiler_init_main_thread_id(); + + TimeStamp ts = TimeStamp::Now(); + { + profiler::ThreadRegistrationInfo trInfo{ + "name", ProfilerThreadId::FromNumber(123), false, ts}; + EXPECT_STREQ(trInfo.Name(), "name"); + EXPECT_NE(trInfo.Name(), "name") + << "ThreadRegistrationInfo should keep its own copy of the name"; + EXPECT_EQ(trInfo.RegisterTime(), ts); + EXPECT_EQ(trInfo.ThreadId(), ProfilerThreadId::FromNumber(123)); + EXPECT_EQ(trInfo.IsMainThread(), false); + } + + // Make sure the next timestamp will be different from `ts`. + while (TimeStamp::Now() == ts) { + } + + { + profiler::ThreadRegistrationInfo trInfoHere{"Here"}; + EXPECT_STREQ(trInfoHere.Name(), "Here"); + EXPECT_NE(trInfoHere.Name(), "Here") + << "ThreadRegistrationInfo should keep its own copy of the name"; + TimeStamp baseRegistrationTime = + baseprofiler::detail::GetThreadRegistrationTime(); + if (baseRegistrationTime) { + EXPECT_EQ(trInfoHere.RegisterTime(), baseRegistrationTime); + } else { + EXPECT_GT(trInfoHere.RegisterTime(), ts); + } + EXPECT_EQ(trInfoHere.ThreadId(), profiler_current_thread_id()); + EXPECT_EQ(trInfoHere.ThreadId(), profiler_main_thread_id()) + << "Gtests are assumed to run on the main thread"; + EXPECT_EQ(trInfoHere.IsMainThread(), true) + << "Gtests are assumed to run on the main thread"; + } + + { + // Sub-thread test. + // These will receive sub-thread data (to test move at thread end). + TimeStamp tsThread; + ProfilerThreadId threadThreadId; + UniquePtr<profiler::ThreadRegistrationInfo> trInfoThreadPtr; + + std::thread testThread([&]() { + profiler::ThreadRegistrationInfo trInfoThread{"Thread"}; + EXPECT_STREQ(trInfoThread.Name(), "Thread"); + EXPECT_NE(trInfoThread.Name(), "Thread") + << "ThreadRegistrationInfo should keep its own copy of the name"; + EXPECT_GT(trInfoThread.RegisterTime(), ts); + EXPECT_EQ(trInfoThread.ThreadId(), profiler_current_thread_id()); + EXPECT_NE(trInfoThread.ThreadId(), profiler_main_thread_id()); + EXPECT_EQ(trInfoThread.IsMainThread(), false); + + tsThread = trInfoThread.RegisterTime(); + threadThreadId = trInfoThread.ThreadId(); + trInfoThreadPtr = + MakeUnique<profiler::ThreadRegistrationInfo>(std::move(trInfoThread)); + }); + testThread.join(); + + ASSERT_NE(trInfoThreadPtr, nullptr); + EXPECT_STREQ(trInfoThreadPtr->Name(), "Thread"); + EXPECT_EQ(trInfoThreadPtr->RegisterTime(), tsThread); + EXPECT_EQ(trInfoThreadPtr->ThreadId(), threadThreadId); + EXPECT_EQ(trInfoThreadPtr->IsMainThread(), false) + << "Gtests are assumed to run on the main thread"; + } +} + +static constexpr ThreadProfilingFeatures scEachAndAnyThreadProfilingFeatures[] = + {ThreadProfilingFeatures::CPUUtilization, ThreadProfilingFeatures::Sampling, + ThreadProfilingFeatures::Markers, ThreadProfilingFeatures::Any}; + +TEST(GeckoProfiler, ThreadProfilingFeaturesType) +{ + ASSERT_EQ(static_cast<uint32_t>(ThreadProfilingFeatures::Any), 1u + 2u + 4u) + << "This test assumes that there are 3 binary choices 1+2+4; " + "Is this test up to date?"; + + EXPECT_EQ(Combine(ThreadProfilingFeatures::CPUUtilization, + ThreadProfilingFeatures::Sampling, + ThreadProfilingFeatures::Markers), + ThreadProfilingFeatures::Any); + + constexpr ThreadProfilingFeatures allThreadProfilingFeatures[] = { + ThreadProfilingFeatures::NotProfiled, + ThreadProfilingFeatures::CPUUtilization, + ThreadProfilingFeatures::Sampling, ThreadProfilingFeatures::Markers, + ThreadProfilingFeatures::Any}; + + for (ThreadProfilingFeatures f1 : allThreadProfilingFeatures) { + // Combine and Intersect are commutative. + for (ThreadProfilingFeatures f2 : allThreadProfilingFeatures) { + EXPECT_EQ(Combine(f1, f2), Combine(f2, f1)); + EXPECT_EQ(Intersect(f1, f2), Intersect(f2, f1)); + } + + // Combine works like OR. + EXPECT_EQ(Combine(f1, f1), f1); + EXPECT_EQ(Combine(f1, f1, f1), f1); + + // 'OR NotProfiled' doesn't change anything. + EXPECT_EQ(Combine(f1, ThreadProfilingFeatures::NotProfiled), f1); + + // 'OR Any' makes Any. + EXPECT_EQ(Combine(f1, ThreadProfilingFeatures::Any), + ThreadProfilingFeatures::Any); + + // Intersect works like AND. + EXPECT_EQ(Intersect(f1, f1), f1); + EXPECT_EQ(Intersect(f1, f1, f1), f1); + + // 'AND NotProfiled' erases anything. + EXPECT_EQ(Intersect(f1, ThreadProfilingFeatures::NotProfiled), + ThreadProfilingFeatures::NotProfiled); + + // 'AND Any' doesn't change anything. + EXPECT_EQ(Intersect(f1, ThreadProfilingFeatures::Any), f1); + } + + for (ThreadProfilingFeatures f1 : scEachAndAnyThreadProfilingFeatures) { + EXPECT_TRUE(DoFeaturesIntersect(f1, f1)); + + // NotProfiled doesn't intersect with any feature. + EXPECT_FALSE(DoFeaturesIntersect(f1, ThreadProfilingFeatures::NotProfiled)); + + // Any intersects with any feature. + EXPECT_TRUE(DoFeaturesIntersect(f1, ThreadProfilingFeatures::Any)); + } +} + +static void TestConstUnlockedConstReader( + const profiler::ThreadRegistration::UnlockedConstReader& aData, + const TimeStamp& aBeforeRegistration, const TimeStamp& aAfterRegistration, + const void* aOnStackObject, + ProfilerThreadId aThreadId = profiler_current_thread_id()) { + EXPECT_STREQ(aData.Info().Name(), "Test thread"); + EXPECT_GE(aData.Info().RegisterTime(), aBeforeRegistration); + EXPECT_LE(aData.Info().RegisterTime(), aAfterRegistration); + EXPECT_EQ(aData.Info().ThreadId(), aThreadId); + EXPECT_FALSE(aData.Info().IsMainThread()); + +#if (defined(_MSC_VER) || defined(__MINGW32__)) && defined(MOZ_GECKO_PROFILER) + HANDLE threadHandle = aData.PlatformDataCRef().ProfiledThread(); + EXPECT_NE(threadHandle, nullptr); + EXPECT_EQ(ProfilerThreadId::FromNumber(::GetThreadId(threadHandle)), + aThreadId); + // Test calling QueryThreadCycleTime, we cannot assume that it will always + // work, but at least it shouldn't crash. + ULONG64 cycles; + (void)QueryThreadCycleTime(threadHandle, &cycles); +#elif defined(__APPLE__) && defined(MOZ_GECKO_PROFILER) + // Test calling thread_info, we cannot assume that it will always work, but at + // least it shouldn't crash. + thread_basic_info_data_t threadBasicInfo; + mach_msg_type_number_t basicCount = THREAD_BASIC_INFO_COUNT; + (void)thread_info( + aData.PlatformDataCRef().ProfiledThread(), THREAD_BASIC_INFO, + reinterpret_cast<thread_info_t>(&threadBasicInfo), &basicCount); +#elif (defined(__linux__) || defined(__ANDROID__) || defined(__FreeBSD__)) && \ + defined(MOZ_GECKO_PROFILER) + // Test calling GetClockId, we cannot assume that it will always work, but at + // least it shouldn't crash. + Maybe<clockid_t> maybeClockId = aData.PlatformDataCRef().GetClockId(); + if (maybeClockId) { + // Test calling clock_gettime, we cannot assume that it will always work, + // but at least it shouldn't crash. + timespec ts; + (void)clock_gettime(*maybeClockId, &ts); + } +#else + (void)aData.PlatformDataCRef(); +#endif + + EXPECT_GE(aData.StackTop(), aOnStackObject) + << "StackTop should be at &onStackChar, or higher on some " + "platforms"; +}; + +static void TestConstUnlockedConstReaderAndAtomicRW( + const profiler::ThreadRegistration::UnlockedConstReaderAndAtomicRW& aData, + const TimeStamp& aBeforeRegistration, const TimeStamp& aAfterRegistration, + const void* aOnStackObject, + ProfilerThreadId aThreadId = profiler_current_thread_id()) { + TestConstUnlockedConstReader(aData, aBeforeRegistration, aAfterRegistration, + aOnStackObject, aThreadId); + + (void)aData.ProfilingStackCRef(); + + EXPECT_EQ(aData.ProfilingFeatures(), ThreadProfilingFeatures::NotProfiled); + + EXPECT_FALSE(aData.IsSleeping()); +}; + +static void TestUnlockedConstReaderAndAtomicRW( + profiler::ThreadRegistration::UnlockedConstReaderAndAtomicRW& aData, + const TimeStamp& aBeforeRegistration, const TimeStamp& aAfterRegistration, + const void* aOnStackObject, + ProfilerThreadId aThreadId = profiler_current_thread_id()) { + TestConstUnlockedConstReaderAndAtomicRW(aData, aBeforeRegistration, + aAfterRegistration, aOnStackObject, + aThreadId); + + (void)aData.ProfilingStackRef(); + + EXPECT_FALSE(aData.IsSleeping()); + aData.SetSleeping(); + EXPECT_TRUE(aData.IsSleeping()); + aData.SetAwake(); + EXPECT_FALSE(aData.IsSleeping()); + + aData.ReinitializeOnResume(); + + EXPECT_FALSE(aData.CanDuplicateLastSampleDueToSleep()); + EXPECT_FALSE(aData.CanDuplicateLastSampleDueToSleep()); + aData.SetSleeping(); + // After sleeping, the 2nd+ calls can duplicate. + EXPECT_FALSE(aData.CanDuplicateLastSampleDueToSleep()); + EXPECT_TRUE(aData.CanDuplicateLastSampleDueToSleep()); + EXPECT_TRUE(aData.CanDuplicateLastSampleDueToSleep()); + aData.ReinitializeOnResume(); + // After reinit (and sleeping), the 2nd+ calls can duplicate. + EXPECT_FALSE(aData.CanDuplicateLastSampleDueToSleep()); + EXPECT_TRUE(aData.CanDuplicateLastSampleDueToSleep()); + EXPECT_TRUE(aData.CanDuplicateLastSampleDueToSleep()); + aData.SetAwake(); + EXPECT_FALSE(aData.CanDuplicateLastSampleDueToSleep()); + EXPECT_FALSE(aData.CanDuplicateLastSampleDueToSleep()); +}; + +static void TestConstUnlockedRWForLockedProfiler( + const profiler::ThreadRegistration::UnlockedRWForLockedProfiler& aData, + const TimeStamp& aBeforeRegistration, const TimeStamp& aAfterRegistration, + const void* aOnStackObject, + ProfilerThreadId aThreadId = profiler_current_thread_id()) { + TestConstUnlockedConstReaderAndAtomicRW(aData, aBeforeRegistration, + aAfterRegistration, aOnStackObject, + aThreadId); + + // We can't create a PSAutoLock here, so just verify that the call would + // compile and return the expected type. + static_assert(std::is_same_v<decltype(aData.GetProfiledThreadData( + std::declval<PSAutoLock>())), + const ProfiledThreadData*>); +}; + +static void TestConstUnlockedReaderAndAtomicRWOnThread( + const profiler::ThreadRegistration::UnlockedReaderAndAtomicRWOnThread& + aData, + const TimeStamp& aBeforeRegistration, const TimeStamp& aAfterRegistration, + const void* aOnStackObject, + ProfilerThreadId aThreadId = profiler_current_thread_id()) { + TestConstUnlockedRWForLockedProfiler(aData, aBeforeRegistration, + aAfterRegistration, aOnStackObject, + aThreadId); + + EXPECT_EQ(aData.GetJSContext(), nullptr); +}; + +static void TestUnlockedRWForLockedProfiler( + profiler::ThreadRegistration::UnlockedRWForLockedProfiler& aData, + const TimeStamp& aBeforeRegistration, const TimeStamp& aAfterRegistration, + const void* aOnStackObject, + ProfilerThreadId aThreadId = profiler_current_thread_id()) { + TestConstUnlockedRWForLockedProfiler(aData, aBeforeRegistration, + aAfterRegistration, aOnStackObject, + aThreadId); + TestUnlockedConstReaderAndAtomicRW(aData, aBeforeRegistration, + aAfterRegistration, aOnStackObject, + aThreadId); + + // No functions to test here. +}; + +static void TestUnlockedReaderAndAtomicRWOnThread( + profiler::ThreadRegistration::UnlockedReaderAndAtomicRWOnThread& aData, + const TimeStamp& aBeforeRegistration, const TimeStamp& aAfterRegistration, + const void* aOnStackObject, + ProfilerThreadId aThreadId = profiler_current_thread_id()) { + TestConstUnlockedReaderAndAtomicRWOnThread(aData, aBeforeRegistration, + aAfterRegistration, aOnStackObject, + aThreadId); + TestUnlockedRWForLockedProfiler(aData, aBeforeRegistration, + aAfterRegistration, aOnStackObject, + aThreadId); + + // No functions to test here. +}; + +static void TestConstLockedRWFromAnyThread( + const profiler::ThreadRegistration::LockedRWFromAnyThread& aData, + const TimeStamp& aBeforeRegistration, const TimeStamp& aAfterRegistration, + const void* aOnStackObject, + ProfilerThreadId aThreadId = profiler_current_thread_id()) { + TestConstUnlockedReaderAndAtomicRWOnThread(aData, aBeforeRegistration, + aAfterRegistration, aOnStackObject, + aThreadId); + + EXPECT_EQ(aData.GetJsFrameBuffer(), nullptr); + EXPECT_EQ(aData.GetEventTarget(), nullptr); +}; + +static void TestLockedRWFromAnyThread( + profiler::ThreadRegistration::LockedRWFromAnyThread& aData, + const TimeStamp& aBeforeRegistration, const TimeStamp& aAfterRegistration, + const void* aOnStackObject, + ProfilerThreadId aThreadId = profiler_current_thread_id()) { + TestConstLockedRWFromAnyThread(aData, aBeforeRegistration, aAfterRegistration, + aOnStackObject, aThreadId); + TestUnlockedReaderAndAtomicRWOnThread(aData, aBeforeRegistration, + aAfterRegistration, aOnStackObject, + aThreadId); + + // We can't create a ProfiledThreadData nor PSAutoLock here, so just verify + // that the call would compile and return the expected type. + static_assert(std::is_same_v<decltype(aData.SetProfilingFeaturesAndData( + std::declval<ThreadProfilingFeatures>(), + std::declval<ProfiledThreadData*>(), + std::declval<PSAutoLock>())), + void>); + + aData.ResetMainThread(nullptr); + + TimeDuration delay = TimeDuration::FromSeconds(1); + TimeDuration running = TimeDuration::FromSeconds(1); + aData.GetRunningEventDelay(TimeStamp::Now(), delay, running); + EXPECT_TRUE(delay.IsZero()); + EXPECT_TRUE(running.IsZero()); + + aData.StartJSSampling(123u); + aData.StopJSSampling(); +}; + +static void TestConstLockedRWOnThread( + const profiler::ThreadRegistration::LockedRWOnThread& aData, + const TimeStamp& aBeforeRegistration, const TimeStamp& aAfterRegistration, + const void* aOnStackObject, + ProfilerThreadId aThreadId = profiler_current_thread_id()) { + TestConstLockedRWFromAnyThread(aData, aBeforeRegistration, aAfterRegistration, + aOnStackObject, aThreadId); + + // No functions to test here. +}; + +static void TestLockedRWOnThread( + profiler::ThreadRegistration::LockedRWOnThread& aData, + const TimeStamp& aBeforeRegistration, const TimeStamp& aAfterRegistration, + const void* aOnStackObject, + ProfilerThreadId aThreadId = profiler_current_thread_id()) { + TestConstLockedRWOnThread(aData, aBeforeRegistration, aAfterRegistration, + aOnStackObject, aThreadId); + TestLockedRWFromAnyThread(aData, aBeforeRegistration, aAfterRegistration, + aOnStackObject, aThreadId); + + // We don't want to really call SetJSContext here, so just verify that + // the call would compile and return the expected type. + static_assert( + std::is_same_v<decltype(aData.SetJSContext(std::declval<JSContext*>())), + void>); + aData.ClearJSContext(); + aData.PollJSSampling(); +}; + +TEST(GeckoProfiler, ThreadRegistration_DataAccess) +{ + using TR = profiler::ThreadRegistration; + + profiler_init_main_thread_id(); + ASSERT_TRUE(profiler_is_main_thread()) + << "This test assumes it runs on the main thread"; + + // Note that the main thread could already be registered, so we work in a new + // thread to test an actual registration that we control. + + std::thread testThread([&]() { + ASSERT_FALSE(TR::IsRegistered()) + << "A new std::thread should not start registered"; + EXPECT_FALSE(TR::GetOnThreadPtr()); + EXPECT_FALSE(TR::WithOnThreadRefOr([&](auto) { return true; }, false)); + + char onStackChar; + + TimeStamp beforeRegistration = TimeStamp::Now(); + TR tr{"Test thread", &onStackChar}; + TimeStamp afterRegistration = TimeStamp::Now(); + + ASSERT_TRUE(TR::IsRegistered()); + + // Note: This test will mostly be about checking the correct access to + // thread data, depending on how it's obtained. Not all the functionality + // related to that data is tested (e.g., because it involves JS or other + // external dependencies that would be difficult to control here.) + + auto TestOnThreadRef = [&](TR::OnThreadRef aOnThreadRef) { + // To test const-qualified member functions. + const TR::OnThreadRef& onThreadCRef = aOnThreadRef; + + // const UnlockedConstReader (always const) + + TestConstUnlockedConstReader(onThreadCRef.UnlockedConstReaderCRef(), + beforeRegistration, afterRegistration, + &onStackChar); + onThreadCRef.WithUnlockedConstReader( + [&](const TR::UnlockedConstReader& aData) { + TestConstUnlockedConstReader(aData, beforeRegistration, + afterRegistration, &onStackChar); + }); + + // const UnlockedConstReaderAndAtomicRW + + TestConstUnlockedConstReaderAndAtomicRW( + onThreadCRef.UnlockedConstReaderAndAtomicRWCRef(), beforeRegistration, + afterRegistration, &onStackChar); + onThreadCRef.WithUnlockedConstReaderAndAtomicRW( + [&](const TR::UnlockedConstReaderAndAtomicRW& aData) { + TestConstUnlockedConstReaderAndAtomicRW( + aData, beforeRegistration, afterRegistration, &onStackChar); + }); + + // non-const UnlockedConstReaderAndAtomicRW + + TestUnlockedConstReaderAndAtomicRW( + aOnThreadRef.UnlockedConstReaderAndAtomicRWRef(), beforeRegistration, + afterRegistration, &onStackChar); + aOnThreadRef.WithUnlockedConstReaderAndAtomicRW( + [&](TR::UnlockedConstReaderAndAtomicRW& aData) { + TestUnlockedConstReaderAndAtomicRW(aData, beforeRegistration, + afterRegistration, &onStackChar); + }); + + // const UnlockedRWForLockedProfiler + + TestConstUnlockedRWForLockedProfiler( + onThreadCRef.UnlockedRWForLockedProfilerCRef(), beforeRegistration, + afterRegistration, &onStackChar); + onThreadCRef.WithUnlockedRWForLockedProfiler( + [&](const TR::UnlockedRWForLockedProfiler& aData) { + TestConstUnlockedRWForLockedProfiler( + aData, beforeRegistration, afterRegistration, &onStackChar); + }); + + // non-const UnlockedRWForLockedProfiler + + TestUnlockedRWForLockedProfiler( + aOnThreadRef.UnlockedRWForLockedProfilerRef(), beforeRegistration, + afterRegistration, &onStackChar); + aOnThreadRef.WithUnlockedRWForLockedProfiler( + [&](TR::UnlockedRWForLockedProfiler& aData) { + TestUnlockedRWForLockedProfiler(aData, beforeRegistration, + afterRegistration, &onStackChar); + }); + + // const UnlockedReaderAndAtomicRWOnThread + + TestConstUnlockedReaderAndAtomicRWOnThread( + onThreadCRef.UnlockedReaderAndAtomicRWOnThreadCRef(), + beforeRegistration, afterRegistration, &onStackChar); + onThreadCRef.WithUnlockedReaderAndAtomicRWOnThread( + [&](const TR::UnlockedReaderAndAtomicRWOnThread& aData) { + TestConstUnlockedReaderAndAtomicRWOnThread( + aData, beforeRegistration, afterRegistration, &onStackChar); + }); + + // non-const UnlockedReaderAndAtomicRWOnThread + + TestUnlockedReaderAndAtomicRWOnThread( + aOnThreadRef.UnlockedReaderAndAtomicRWOnThreadRef(), + beforeRegistration, afterRegistration, &onStackChar); + aOnThreadRef.WithUnlockedReaderAndAtomicRWOnThread( + [&](TR::UnlockedReaderAndAtomicRWOnThread& aData) { + TestUnlockedReaderAndAtomicRWOnThread( + aData, beforeRegistration, afterRegistration, &onStackChar); + }); + + // LockedRWFromAnyThread + // Note: It cannot directly be accessed on the thread, this will be + // tested through LockedRWOnThread. + + // const LockedRWOnThread + + EXPECT_FALSE(TR::IsDataMutexLockedOnCurrentThread()); + { + TR::OnThreadRef::ConstRWOnThreadWithLock constRWOnThreadWithLock = + onThreadCRef.ConstLockedRWOnThread(); + EXPECT_TRUE(TR::IsDataMutexLockedOnCurrentThread()); + TestConstLockedRWOnThread(constRWOnThreadWithLock.DataCRef(), + beforeRegistration, afterRegistration, + &onStackChar); + } + EXPECT_FALSE(TR::IsDataMutexLockedOnCurrentThread()); + onThreadCRef.WithConstLockedRWOnThread( + [&](const TR::LockedRWOnThread& aData) { + EXPECT_TRUE(TR::IsDataMutexLockedOnCurrentThread()); + TestConstLockedRWOnThread(aData, beforeRegistration, + afterRegistration, &onStackChar); + }); + EXPECT_FALSE(TR::IsDataMutexLockedOnCurrentThread()); + + // non-const LockedRWOnThread + + EXPECT_FALSE(TR::IsDataMutexLockedOnCurrentThread()); + { + TR::OnThreadRef::RWOnThreadWithLock rwOnThreadWithLock = + aOnThreadRef.GetLockedRWOnThread(); + EXPECT_TRUE(TR::IsDataMutexLockedOnCurrentThread()); + TestConstLockedRWOnThread(rwOnThreadWithLock.DataCRef(), + beforeRegistration, afterRegistration, + &onStackChar); + TestLockedRWOnThread(rwOnThreadWithLock.DataRef(), beforeRegistration, + afterRegistration, &onStackChar); + } + EXPECT_FALSE(TR::IsDataMutexLockedOnCurrentThread()); + aOnThreadRef.WithLockedRWOnThread([&](TR::LockedRWOnThread& aData) { + EXPECT_TRUE(TR::IsDataMutexLockedOnCurrentThread()); + TestLockedRWOnThread(aData, beforeRegistration, afterRegistration, + &onStackChar); + }); + EXPECT_FALSE(TR::IsDataMutexLockedOnCurrentThread()); + }; + + TR::OnThreadPtr onThreadPtr = TR::GetOnThreadPtr(); + ASSERT_TRUE(onThreadPtr); + TestOnThreadRef(*onThreadPtr); + + TR::WithOnThreadRef( + [&](TR::OnThreadRef aOnThreadRef) { TestOnThreadRef(aOnThreadRef); }); + + EXPECT_TRUE(TR::WithOnThreadRefOr( + [&](TR::OnThreadRef aOnThreadRef) { + TestOnThreadRef(aOnThreadRef); + return true; + }, + false)); + }); + testThread.join(); +} + +// Thread name if registered, nullptr otherwise. +static const char* GetThreadName() { + return profiler::ThreadRegistration::WithOnThreadRefOr( + [](profiler::ThreadRegistration::OnThreadRef onThreadRef) { + return onThreadRef.WithUnlockedConstReader( + [](const profiler::ThreadRegistration::UnlockedConstReader& aData) { + return aData.Info().Name(); + }); + }, + nullptr); +} + +// Get the thread name, as registered in the PRThread, nullptr on failure. +static const char* GetPRThreadName() { + nsIThread* nsThread = NS_GetCurrentThread(); + if (!nsThread) { + return nullptr; + } + PRThread* prThread = nullptr; + if (NS_FAILED(nsThread->GetPRThread(&prThread))) { + return nullptr; + } + if (!prThread) { + return nullptr; + } + return PR_GetThreadName(prThread); +} + +TEST(GeckoProfiler, ThreadRegistration_MainThreadName) +{ + EXPECT_TRUE(profiler::ThreadRegistration::IsRegistered()); + EXPECT_STREQ(GetThreadName(), "GeckoMain"); + + // Check that the real thread name (outside the profiler) is *not* GeckoMain. + EXPECT_STRNE(GetPRThreadName(), "GeckoMain"); +} + +TEST(GeckoProfiler, ThreadRegistration_NestedRegistrations) +{ + using TR = profiler::ThreadRegistration; + + profiler_init_main_thread_id(); + ASSERT_TRUE(profiler_is_main_thread()) + << "This test assumes it runs on the main thread"; + + // Note that the main thread could already be registered, so we work in a new + // thread to test actual registrations that we control. + + std::thread testThread([&]() { + ASSERT_FALSE(TR::IsRegistered()) + << "A new std::thread should not start registered"; + + char onStackChar; + + // Blocks {} are mostly for clarity, but some control on-stack registration + // lifetimes. + + // On-stack registration. + { + TR rt{"Test thread #1", &onStackChar}; + ASSERT_TRUE(TR::IsRegistered()); + EXPECT_STREQ(GetThreadName(), "Test thread #1"); + EXPECT_STREQ(GetPRThreadName(), "Test thread #1"); + } + ASSERT_FALSE(TR::IsRegistered()); + + // Off-stack registration. + { + TR::RegisterThread("Test thread #2", &onStackChar); + ASSERT_TRUE(TR::IsRegistered()); + EXPECT_STREQ(GetThreadName(), "Test thread #2"); + EXPECT_STREQ(GetPRThreadName(), "Test thread #2"); + + TR::UnregisterThread(); + ASSERT_FALSE(TR::IsRegistered()); + } + + // Extra un-registration should be ignored. + TR::UnregisterThread(); + ASSERT_FALSE(TR::IsRegistered()); + + // Nested on-stack. + { + TR rt2{"Test thread #3", &onStackChar}; + ASSERT_TRUE(TR::IsRegistered()); + EXPECT_STREQ(GetThreadName(), "Test thread #3"); + EXPECT_STREQ(GetPRThreadName(), "Test thread #3"); + + { + TR rt3{"Test thread #4", &onStackChar}; + ASSERT_TRUE(TR::IsRegistered()); + EXPECT_STREQ(GetThreadName(), "Test thread #3") + << "Nested registration shouldn't change the name"; + EXPECT_STREQ(GetPRThreadName(), "Test thread #3") + << "Nested registration shouldn't change the PRThread name"; + } + ASSERT_TRUE(TR::IsRegistered()) + << "Thread should still be registered after nested un-registration"; + EXPECT_STREQ(GetThreadName(), "Test thread #3") + << "Thread should still be registered after nested un-registration"; + EXPECT_STREQ(GetPRThreadName(), "Test thread #3"); + } + ASSERT_FALSE(TR::IsRegistered()); + + // Nested off-stack. + { + TR::RegisterThread("Test thread #5", &onStackChar); + ASSERT_TRUE(TR::IsRegistered()); + EXPECT_STREQ(GetThreadName(), "Test thread #5"); + EXPECT_STREQ(GetPRThreadName(), "Test thread #5"); + + { + TR::RegisterThread("Test thread #6", &onStackChar); + ASSERT_TRUE(TR::IsRegistered()); + EXPECT_STREQ(GetThreadName(), "Test thread #5") + << "Nested registration shouldn't change the name"; + EXPECT_STREQ(GetPRThreadName(), "Test thread #5") + << "Nested registration shouldn't change the PRThread name"; + + TR::UnregisterThread(); + ASSERT_TRUE(TR::IsRegistered()) + << "Thread should still be registered after nested un-registration"; + EXPECT_STREQ(GetThreadName(), "Test thread #5") + << "Thread should still be registered after nested un-registration"; + EXPECT_STREQ(GetPRThreadName(), "Test thread #5"); + } + + TR::UnregisterThread(); + ASSERT_FALSE(TR::IsRegistered()); + } + + // Nested on- and off-stack. + { + TR rt2{"Test thread #7", &onStackChar}; + ASSERT_TRUE(TR::IsRegistered()); + EXPECT_STREQ(GetThreadName(), "Test thread #7"); + EXPECT_STREQ(GetPRThreadName(), "Test thread #7"); + + { + TR::RegisterThread("Test thread #8", &onStackChar); + ASSERT_TRUE(TR::IsRegistered()); + EXPECT_STREQ(GetThreadName(), "Test thread #7") + << "Nested registration shouldn't change the name"; + EXPECT_STREQ(GetPRThreadName(), "Test thread #7") + << "Nested registration shouldn't change the PRThread name"; + + TR::UnregisterThread(); + ASSERT_TRUE(TR::IsRegistered()) + << "Thread should still be registered after nested un-registration"; + EXPECT_STREQ(GetThreadName(), "Test thread #7") + << "Thread should still be registered after nested un-registration"; + EXPECT_STREQ(GetPRThreadName(), "Test thread #7"); + } + } + ASSERT_FALSE(TR::IsRegistered()); + + // Nested off- and on-stack. + { + TR::RegisterThread("Test thread #9", &onStackChar); + ASSERT_TRUE(TR::IsRegistered()); + EXPECT_STREQ(GetThreadName(), "Test thread #9"); + EXPECT_STREQ(GetPRThreadName(), "Test thread #9"); + + { + TR rt3{"Test thread #10", &onStackChar}; + ASSERT_TRUE(TR::IsRegistered()); + EXPECT_STREQ(GetThreadName(), "Test thread #9") + << "Nested registration shouldn't change the name"; + EXPECT_STREQ(GetPRThreadName(), "Test thread #9") + << "Nested registration shouldn't change the PRThread name"; + } + ASSERT_TRUE(TR::IsRegistered()) + << "Thread should still be registered after nested un-registration"; + EXPECT_STREQ(GetThreadName(), "Test thread #9") + << "Thread should still be registered after nested un-registration"; + EXPECT_STREQ(GetPRThreadName(), "Test thread #9"); + + TR::UnregisterThread(); + ASSERT_FALSE(TR::IsRegistered()); + } + + // Excess UnregisterThread with on-stack TR. + { + TR rt2{"Test thread #11", &onStackChar}; + ASSERT_TRUE(TR::IsRegistered()); + EXPECT_STREQ(GetThreadName(), "Test thread #11"); + EXPECT_STREQ(GetPRThreadName(), "Test thread #11"); + + TR::UnregisterThread(); + ASSERT_TRUE(TR::IsRegistered()) + << "On-stack thread should still be registered after off-stack " + "un-registration"; + EXPECT_STREQ(GetThreadName(), "Test thread #11") + << "On-stack thread should still be registered after off-stack " + "un-registration"; + EXPECT_STREQ(GetPRThreadName(), "Test thread #11"); + } + ASSERT_FALSE(TR::IsRegistered()); + + // Excess on-thread TR destruction with already-unregistered root off-thread + // registration. + { + TR::RegisterThread("Test thread #12", &onStackChar); + ASSERT_TRUE(TR::IsRegistered()); + EXPECT_STREQ(GetThreadName(), "Test thread #12"); + EXPECT_STREQ(GetPRThreadName(), "Test thread #12"); + + { + TR rt3{"Test thread #13", &onStackChar}; + ASSERT_TRUE(TR::IsRegistered()); + EXPECT_STREQ(GetThreadName(), "Test thread #12") + << "Nested registration shouldn't change the name"; + EXPECT_STREQ(GetPRThreadName(), "Test thread #12") + << "Nested registration shouldn't change the PRThread name"; + + // Note that we unregister the root registration, while nested `rt3` is + // still alive. + TR::UnregisterThread(); + ASSERT_FALSE(TR::IsRegistered()) + << "UnregisterThread() of the root RegisterThread() should always work"; + + // At this end of this block, `rt3` will be destroyed, but nothing + // should happen. + } + ASSERT_FALSE(TR::IsRegistered()); + } + + ASSERT_FALSE(TR::IsRegistered()); + }); + testThread.join(); +} + +TEST(GeckoProfiler, ThreadRegistry_DataAccess) +{ + using TR = profiler::ThreadRegistration; + using TRy = profiler::ThreadRegistry; + + profiler_init_main_thread_id(); + ASSERT_TRUE(profiler_is_main_thread()) + << "This test assumes it runs on the main thread"; + + // Note that the main thread could already be registered, so we work in a new + // thread to test an actual registration that we control. + + std::thread testThread([&]() { + ASSERT_FALSE(TR::IsRegistered()) + << "A new std::thread should not start registered"; + EXPECT_FALSE(TR::GetOnThreadPtr()); + EXPECT_FALSE(TR::WithOnThreadRefOr([&](auto) { return true; }, false)); + + char onStackChar; + + TimeStamp beforeRegistration = TimeStamp::Now(); + TR tr{"Test thread", &onStackChar}; + TimeStamp afterRegistration = TimeStamp::Now(); + + ASSERT_TRUE(TR::IsRegistered()); + + // Note: This test will mostly be about checking the correct access to + // thread data, depending on how it's obtained. Not all the functionality + // related to that data is tested (e.g., because it involves JS or other + // external dependencies that would be difficult to control here.) + + const ProfilerThreadId testThreadId = profiler_current_thread_id(); + + auto testThroughRegistry = [&]() { + auto TestOffThreadRef = [&](TRy::OffThreadRef aOffThreadRef) { + // To test const-qualified member functions. + const TRy::OffThreadRef& offThreadCRef = aOffThreadRef; + + // const UnlockedConstReader (always const) + + TestConstUnlockedConstReader(offThreadCRef.UnlockedConstReaderCRef(), + beforeRegistration, afterRegistration, + &onStackChar, testThreadId); + offThreadCRef.WithUnlockedConstReader( + [&](const TR::UnlockedConstReader& aData) { + TestConstUnlockedConstReader(aData, beforeRegistration, + afterRegistration, &onStackChar, + testThreadId); + }); + + // const UnlockedConstReaderAndAtomicRW + + TestConstUnlockedConstReaderAndAtomicRW( + offThreadCRef.UnlockedConstReaderAndAtomicRWCRef(), + beforeRegistration, afterRegistration, &onStackChar, testThreadId); + offThreadCRef.WithUnlockedConstReaderAndAtomicRW( + [&](const TR::UnlockedConstReaderAndAtomicRW& aData) { + TestConstUnlockedConstReaderAndAtomicRW( + aData, beforeRegistration, afterRegistration, &onStackChar, + testThreadId); + }); + + // non-const UnlockedConstReaderAndAtomicRW + + TestUnlockedConstReaderAndAtomicRW( + aOffThreadRef.UnlockedConstReaderAndAtomicRWRef(), + beforeRegistration, afterRegistration, &onStackChar, testThreadId); + aOffThreadRef.WithUnlockedConstReaderAndAtomicRW( + [&](TR::UnlockedConstReaderAndAtomicRW& aData) { + TestUnlockedConstReaderAndAtomicRW(aData, beforeRegistration, + afterRegistration, + &onStackChar, testThreadId); + }); + + // const UnlockedRWForLockedProfiler + + TestConstUnlockedRWForLockedProfiler( + offThreadCRef.UnlockedRWForLockedProfilerCRef(), beforeRegistration, + afterRegistration, &onStackChar, testThreadId); + offThreadCRef.WithUnlockedRWForLockedProfiler( + [&](const TR::UnlockedRWForLockedProfiler& aData) { + TestConstUnlockedRWForLockedProfiler(aData, beforeRegistration, + afterRegistration, + &onStackChar, testThreadId); + }); + + // non-const UnlockedRWForLockedProfiler + + TestUnlockedRWForLockedProfiler( + aOffThreadRef.UnlockedRWForLockedProfilerRef(), beforeRegistration, + afterRegistration, &onStackChar, testThreadId); + aOffThreadRef.WithUnlockedRWForLockedProfiler( + [&](TR::UnlockedRWForLockedProfiler& aData) { + TestUnlockedRWForLockedProfiler(aData, beforeRegistration, + afterRegistration, &onStackChar, + testThreadId); + }); + + // UnlockedReaderAndAtomicRWOnThread + // Note: It cannot directly be accessed off the thread, this will be + // tested through LockedRWFromAnyThread. + + // const LockedRWFromAnyThread + + EXPECT_FALSE(TR::IsDataMutexLockedOnCurrentThread()); + { + TRy::OffThreadRef::ConstRWFromAnyThreadWithLock + constRWFromAnyThreadWithLock = + offThreadCRef.ConstLockedRWFromAnyThread(); + if (profiler_current_thread_id() == testThreadId) { + EXPECT_TRUE(TR::IsDataMutexLockedOnCurrentThread()); + } + TestConstLockedRWFromAnyThread( + constRWFromAnyThreadWithLock.DataCRef(), beforeRegistration, + afterRegistration, &onStackChar, testThreadId); + } + EXPECT_FALSE(TR::IsDataMutexLockedOnCurrentThread()); + offThreadCRef.WithConstLockedRWFromAnyThread( + [&](const TR::LockedRWFromAnyThread& aData) { + if (profiler_current_thread_id() == testThreadId) { + EXPECT_TRUE(TR::IsDataMutexLockedOnCurrentThread()); + } + TestConstLockedRWFromAnyThread(aData, beforeRegistration, + afterRegistration, &onStackChar, + testThreadId); + }); + EXPECT_FALSE(TR::IsDataMutexLockedOnCurrentThread()); + + // non-const LockedRWFromAnyThread + + EXPECT_FALSE(TR::IsDataMutexLockedOnCurrentThread()); + { + TRy::OffThreadRef::RWFromAnyThreadWithLock rwFromAnyThreadWithLock = + aOffThreadRef.GetLockedRWFromAnyThread(); + if (profiler_current_thread_id() == testThreadId) { + EXPECT_TRUE(TR::IsDataMutexLockedOnCurrentThread()); + } + TestLockedRWFromAnyThread(rwFromAnyThreadWithLock.DataRef(), + beforeRegistration, afterRegistration, + &onStackChar, testThreadId); + } + EXPECT_FALSE(TR::IsDataMutexLockedOnCurrentThread()); + aOffThreadRef.WithLockedRWFromAnyThread( + [&](TR::LockedRWFromAnyThread& aData) { + if (profiler_current_thread_id() == testThreadId) { + EXPECT_TRUE(TR::IsDataMutexLockedOnCurrentThread()); + } + TestLockedRWFromAnyThread(aData, beforeRegistration, + afterRegistration, &onStackChar, + testThreadId); + }); + EXPECT_FALSE(TR::IsDataMutexLockedOnCurrentThread()); + + // LockedRWOnThread + // Note: It can never be accessed off the thread. + }; + + int ranTest = 0; + TRy::WithOffThreadRef(testThreadId, [&](TRy::OffThreadRef aOffThreadRef) { + TestOffThreadRef(aOffThreadRef); + ++ranTest; + }); + EXPECT_EQ(ranTest, 1); + + EXPECT_TRUE(TRy::WithOffThreadRefOr( + testThreadId, + [&](TRy::OffThreadRef aOffThreadRef) { + TestOffThreadRef(aOffThreadRef); + return true; + }, + false)); + + ranTest = 0; + EXPECT_FALSE(TRy::IsRegistryMutexLockedOnCurrentThread()); + for (TRy::OffThreadRef offThreadRef : TRy::LockedRegistry{}) { + EXPECT_TRUE(TRy::IsRegistryMutexLockedOnCurrentThread() || + !TR::IsRegistered()); + if (offThreadRef.UnlockedConstReaderCRef().Info().ThreadId() == + testThreadId) { + TestOffThreadRef(offThreadRef); + ++ranTest; + } + } + EXPECT_EQ(ranTest, 1); + EXPECT_FALSE(TRy::IsRegistryMutexLockedOnCurrentThread()); + + { + ranTest = 0; + EXPECT_FALSE(TRy::IsRegistryMutexLockedOnCurrentThread()); + TRy::LockedRegistry lockedRegistry{}; + EXPECT_TRUE(TRy::IsRegistryMutexLockedOnCurrentThread() || + !TR::IsRegistered()); + for (TRy::OffThreadRef offThreadRef : lockedRegistry) { + if (offThreadRef.UnlockedConstReaderCRef().Info().ThreadId() == + testThreadId) { + TestOffThreadRef(offThreadRef); + ++ranTest; + } + } + EXPECT_EQ(ranTest, 1); + } + EXPECT_FALSE(TRy::IsRegistryMutexLockedOnCurrentThread()); + }; + + // Test on the current thread. + testThroughRegistry(); + + // Test from another thread. + std::thread otherThread([&]() { + ASSERT_NE(profiler_current_thread_id(), testThreadId); + testThroughRegistry(); + + // Test that this unregistered thread is really not registered. + int ranTest = 0; + TRy::WithOffThreadRef( + profiler_current_thread_id(), + [&](TRy::OffThreadRef aOffThreadRef) { ++ranTest; }); + EXPECT_EQ(ranTest, 0); + + EXPECT_FALSE(TRy::WithOffThreadRefOr( + profiler_current_thread_id(), + [&](TRy::OffThreadRef aOffThreadRef) { + ++ranTest; + return true; + }, + false)); + EXPECT_EQ(ranTest, 0); + }); + otherThread.join(); + }); + testThread.join(); +} + +TEST(GeckoProfiler, ThreadRegistration_RegistrationEdgeCases) +{ + using TR = profiler::ThreadRegistration; + using TRy = profiler::ThreadRegistry; + + profiler_init_main_thread_id(); + ASSERT_TRUE(profiler_is_main_thread()) + << "This test assumes it runs on the main thread"; + + // Note that the main thread could already be registered, so we work in a new + // thread to test an actual registration that we control. + + int registrationCount = 0; + int otherThreadLoops = 0; + int otherThreadReads = 0; + + // This thread will register and unregister in a loop, with some pauses. + // Another thread will attempty to access the test thread, and lock its data. + // The main goal is to check edges cases around (un)registrations. + std::thread testThread([&]() { + const ProfilerThreadId testThreadId = profiler_current_thread_id(); + + const TimeStamp endTestAt = TimeStamp::Now() + TimeDuration::FromSeconds(1); + + std::thread otherThread([&]() { + // Initial sleep so that testThread can start its loop. + PR_Sleep(PR_MillisecondsToInterval(1)); + + while (TimeStamp::Now() < endTestAt) { + ++otherThreadLoops; + + TRy::WithOffThreadRef(testThreadId, [&](TRy::OffThreadRef + aOffThreadRef) { + if (otherThreadLoops % 1000 == 0) { + PR_Sleep(PR_MillisecondsToInterval(1)); + } + TRy::OffThreadRef::RWFromAnyThreadWithLock rwFromAnyThreadWithLock = + aOffThreadRef.GetLockedRWFromAnyThread(); + ++otherThreadReads; + if (otherThreadReads % 1000 == 0) { + PR_Sleep(PR_MillisecondsToInterval(1)); + } + }); + } + }); + + while (TimeStamp::Now() < endTestAt) { + ASSERT_FALSE(TR::IsRegistered()) + << "A new std::thread should not start registered"; + EXPECT_FALSE(TR::GetOnThreadPtr()); + EXPECT_FALSE(TR::WithOnThreadRefOr([&](auto) { return true; }, false)); + + char onStackChar; + + TR tr{"Test thread", &onStackChar}; + ++registrationCount; + + ASSERT_TRUE(TR::IsRegistered()); + + int ranTest = 0; + TRy::WithOffThreadRef(testThreadId, [&](TRy::OffThreadRef aOffThreadRef) { + if (registrationCount % 2000 == 0) { + PR_Sleep(PR_MillisecondsToInterval(1)); + } + ++ranTest; + }); + EXPECT_EQ(ranTest, 1); + + if (registrationCount % 1000 == 0) { + PR_Sleep(PR_MillisecondsToInterval(1)); + } + } + + otherThread.join(); + }); + + testThread.join(); + + // It's difficult to guess what these numbers should be, but they definitely + // should be non-zero. The main goal was to test that nothing goes wrong. + EXPECT_GT(registrationCount, 0); + EXPECT_GT(otherThreadLoops, 0); + EXPECT_GT(otherThreadReads, 0); +} + +#ifdef MOZ_GECKO_PROFILER + +TEST(BaseProfiler, BlocksRingBuffer) +{ + constexpr uint32_t MBSize = 256; + uint8_t buffer[MBSize * 3]; + for (size_t i = 0; i < MBSize * 3; ++i) { + buffer[i] = uint8_t('A' + i); + } + BlocksRingBuffer rb(BlocksRingBuffer::ThreadSafety::WithMutex, + &buffer[MBSize], MakePowerOfTwo32<MBSize>()); + + { + nsCString cs("nsCString"_ns); + nsString s(u"nsString"_ns); + nsAutoCString acs("nsAutoCString"_ns); + nsAutoString as(u"nsAutoString"_ns); + nsAutoCStringN<8> acs8("nsAutoCStringN"_ns); + nsAutoStringN<8> as8(u"nsAutoStringN"_ns); + JS::UniqueChars jsuc = JS_smprintf("%s", "JS::UniqueChars"); + + rb.PutObjects(cs, s, acs, as, acs8, as8, jsuc); + } + + rb.ReadEach([](ProfileBufferEntryReader& aER) { + ASSERT_EQ(aER.ReadObject<nsCString>(), "nsCString"_ns); + ASSERT_EQ(aER.ReadObject<nsString>(), u"nsString"_ns); + ASSERT_EQ(aER.ReadObject<nsAutoCString>(), "nsAutoCString"_ns); + ASSERT_EQ(aER.ReadObject<nsAutoString>(), u"nsAutoString"_ns); + ASSERT_EQ(aER.ReadObject<nsAutoCStringN<8>>(), "nsAutoCStringN"_ns); + ASSERT_EQ(aER.ReadObject<nsAutoStringN<8>>(), u"nsAutoStringN"_ns); + auto jsuc2 = aER.ReadObject<JS::UniqueChars>(); + ASSERT_TRUE(!!jsuc2); + ASSERT_TRUE(strcmp(jsuc2.get(), "JS::UniqueChars") == 0); + }); + + // Everything around the sub-buffer should be unchanged. + for (size_t i = 0; i < MBSize; ++i) { + ASSERT_EQ(buffer[i], uint8_t('A' + i)); + } + for (size_t i = MBSize * 2; i < MBSize * 3; ++i) { + ASSERT_EQ(buffer[i], uint8_t('A' + i)); + } +} + +// Common JSON checks. + +// Check that the given JSON string include no JSON whitespace characters +// (excluding those in property names and strings). +void JSONWhitespaceCheck(const char* aOutput) { + ASSERT_NE(aOutput, nullptr); + + enum class State { Data, String, StringEscaped }; + State state = State::Data; + size_t length = 0; + size_t whitespaces = 0; + for (const char* p = aOutput; *p != '\0'; ++p) { + ++length; + const char c = *p; + + switch (state) { + case State::Data: + if (c == '\n' || c == '\r' || c == ' ' || c == '\t') { + ++whitespaces; + } else if (c == '"') { + state = State::String; + } + break; + + case State::String: + if (c == '"') { + state = State::Data; + } else if (c == '\\') { + state = State::StringEscaped; + } + break; + + case State::StringEscaped: + state = State::String; + break; + } + } + + EXPECT_EQ(whitespaces, 0u); + EXPECT_GT(length, 0u); +} + +// Does the GETTER return a non-null TYPE? (Non-critical) +# define EXPECT_HAS_JSON(GETTER, TYPE) \ + do { \ + if ((GETTER).isNull()) { \ + EXPECT_FALSE((GETTER).isNull()) \ + << #GETTER " doesn't exist or is null"; \ + } else if (!(GETTER).is##TYPE()) { \ + EXPECT_TRUE((GETTER).is##TYPE()) \ + << #GETTER " didn't return type " #TYPE; \ + } \ + } while (false) + +// Does the GETTER return a non-null TYPE? (Critical) +# define ASSERT_HAS_JSON(GETTER, TYPE) \ + do { \ + ASSERT_FALSE((GETTER).isNull()); \ + ASSERT_TRUE((GETTER).is##TYPE()); \ + } while (false) + +// Does the GETTER return a non-null TYPE? (Critical) +// If yes, store the reference to Json::Value into VARIABLE. +# define GET_JSON(VARIABLE, GETTER, TYPE) \ + ASSERT_HAS_JSON(GETTER, TYPE); \ + const Json::Value& VARIABLE = (GETTER) + +// Does the GETTER return a non-null TYPE? (Critical) +// If yes, store the value as `const TYPE` into VARIABLE. +# define GET_JSON_VALUE(VARIABLE, GETTER, TYPE) \ + ASSERT_HAS_JSON(GETTER, TYPE); \ + const auto VARIABLE = (GETTER).as##TYPE() + +// Non-const GET_JSON_VALUE. +# define GET_JSON_MUTABLE_VALUE(VARIABLE, GETTER, TYPE) \ + ASSERT_HAS_JSON(GETTER, TYPE); \ + auto VARIABLE = (GETTER).as##TYPE() + +// Checks that the GETTER's value is present, is of the expected TYPE, and has +// the expected VALUE. (Non-critical) +# define EXPECT_EQ_JSON(GETTER, TYPE, VALUE) \ + do { \ + if ((GETTER).isNull()) { \ + EXPECT_FALSE((GETTER).isNull()) \ + << #GETTER " doesn't exist or is null"; \ + } else if (!(GETTER).is##TYPE()) { \ + EXPECT_TRUE((GETTER).is##TYPE()) \ + << #GETTER " didn't return type " #TYPE; \ + } else { \ + EXPECT_EQ((GETTER).as##TYPE(), (VALUE)); \ + } \ + } while (false) + +// Checks that the GETTER's value is present, and is a valid index into the +// STRINGTABLE array, pointing at the expected STRING. +# define EXPECT_EQ_STRINGTABLE(GETTER, STRINGTABLE, STRING) \ + do { \ + if ((GETTER).isNull()) { \ + EXPECT_FALSE((GETTER).isNull()) \ + << #GETTER " doesn't exist or is null"; \ + } else if (!(GETTER).isUInt()) { \ + EXPECT_TRUE((GETTER).isUInt()) << #GETTER " didn't return an index"; \ + } else { \ + EXPECT_LT((GETTER).asUInt(), (STRINGTABLE).size()); \ + EXPECT_EQ_JSON((STRINGTABLE)[(GETTER).asUInt()], String, (STRING)); \ + } \ + } while (false) + +# define EXPECT_JSON_ARRAY_CONTAINS(GETTER, TYPE, VALUE) \ + do { \ + if ((GETTER).isNull()) { \ + EXPECT_FALSE((GETTER).isNull()) \ + << #GETTER " doesn't exist or is null"; \ + } else if (!(GETTER).isArray()) { \ + EXPECT_TRUE((GETTER).is##TYPE()) << #GETTER " is not an array"; \ + } else if (const Json::ArrayIndex size = (GETTER).size(); size == 0u) { \ + EXPECT_NE(size, 0u) << #GETTER " is an empty array"; \ + } else { \ + bool found = false; \ + for (Json::ArrayIndex i = 0; i < size; ++i) { \ + if (!(GETTER)[i].is##TYPE()) { \ + EXPECT_TRUE((GETTER)[i].is##TYPE()) \ + << #GETTER "[" << i << "] is not " #TYPE; \ + break; \ + } \ + if ((GETTER)[i].as##TYPE() == (VALUE)) { \ + found = true; \ + break; \ + } \ + } \ + EXPECT_TRUE(found) << #GETTER " doesn't contain " #VALUE; \ + } \ + } while (false) + +# define EXPECT_JSON_ARRAY_EXCLUDES(GETTER, TYPE, VALUE) \ + do { \ + if ((GETTER).isNull()) { \ + EXPECT_FALSE((GETTER).isNull()) \ + << #GETTER " doesn't exist or is null"; \ + } else if (!(GETTER).isArray()) { \ + EXPECT_TRUE((GETTER).is##TYPE()) << #GETTER " is not an array"; \ + } else { \ + const Json::ArrayIndex size = (GETTER).size(); \ + for (Json::ArrayIndex i = 0; i < size; ++i) { \ + if (!(GETTER)[i].is##TYPE()) { \ + EXPECT_TRUE((GETTER)[i].is##TYPE()) \ + << #GETTER "[" << i << "] is not " #TYPE; \ + break; \ + } \ + if ((GETTER)[i].as##TYPE() == (VALUE)) { \ + EXPECT_TRUE((GETTER)[i].as##TYPE() != (VALUE)) \ + << #GETTER " contains " #VALUE; \ + break; \ + } \ + } \ + } \ + } while (false) + +// Check that the given process root contains all the expected properties. +static void JSONRootCheck(const Json::Value& aRoot, + bool aWithMainThread = true) { + ASSERT_TRUE(aRoot.isObject()); + + EXPECT_HAS_JSON(aRoot["libs"], Array); + + GET_JSON(meta, aRoot["meta"], Object); + EXPECT_HAS_JSON(meta["version"], UInt); + EXPECT_HAS_JSON(meta["startTime"], Double); + EXPECT_HAS_JSON(meta["profilingStartTime"], Double); + EXPECT_HAS_JSON(meta["contentEarliestTime"], Double); + EXPECT_HAS_JSON(meta["profilingEndTime"], Double); + + EXPECT_HAS_JSON(aRoot["pages"], Array); + + EXPECT_HAS_JSON(aRoot["profilerOverhead"], Object); + + // "counters" is only present if there is any data to report. + // Test that expect "counters" should test for its presence first. + if (aRoot.isMember("counters")) { + // We have "counters", test their overall validity. + GET_JSON(counters, aRoot["counters"], Array); + for (const Json::Value& counter : counters) { + ASSERT_TRUE(counter.isObject()); + EXPECT_HAS_JSON(counter["name"], String); + EXPECT_HAS_JSON(counter["category"], String); + EXPECT_HAS_JSON(counter["description"], String); + GET_JSON(sampleGroups, counter["sample_groups"], Array); + for (const Json::Value& sampleGroup : sampleGroups) { + ASSERT_TRUE(sampleGroup.isObject()); + EXPECT_HAS_JSON(sampleGroup["id"], UInt); + + GET_JSON(samples, sampleGroup["samples"], Object); + GET_JSON(samplesSchema, samples["schema"], Object); + EXPECT_GE(samplesSchema.size(), 3u); + GET_JSON_VALUE(samplesTime, samplesSchema["time"], UInt); + GET_JSON_VALUE(samplesNumber, samplesSchema["number"], UInt); + GET_JSON_VALUE(samplesCount, samplesSchema["count"], UInt); + GET_JSON(samplesData, samples["data"], Array); + double previousTime = 0.0; + for (const Json::Value& sample : samplesData) { + ASSERT_TRUE(sample.isArray()); + GET_JSON_VALUE(time, sample[samplesTime], Double); + EXPECT_GE(time, previousTime); + previousTime = time; + if (sample.isValidIndex(samplesNumber)) { + EXPECT_HAS_JSON(sample[samplesNumber], UInt64); + } + if (sample.isValidIndex(samplesCount)) { + EXPECT_HAS_JSON(sample[samplesCount], Int64); + } + } + } + } + } + + GET_JSON(threads, aRoot["threads"], Array); + const Json::ArrayIndex threadCount = threads.size(); + for (Json::ArrayIndex i = 0; i < threadCount; ++i) { + GET_JSON(thread, threads[i], Object); + EXPECT_HAS_JSON(thread["processType"], String); + EXPECT_HAS_JSON(thread["name"], String); + EXPECT_HAS_JSON(thread["registerTime"], Double); + GET_JSON(samples, thread["samples"], Object); + EXPECT_HAS_JSON(thread["markers"], Object); + EXPECT_HAS_JSON(thread["pid"], Int64); + EXPECT_HAS_JSON(thread["tid"], Int64); + GET_JSON(stackTable, thread["stackTable"], Object); + GET_JSON(frameTable, thread["frameTable"], Object); + GET_JSON(stringTable, thread["stringTable"], Array); + + GET_JSON(stackTableSchema, stackTable["schema"], Object); + EXPECT_GE(stackTableSchema.size(), 2u); + GET_JSON_VALUE(stackTablePrefix, stackTableSchema["prefix"], UInt); + GET_JSON_VALUE(stackTableFrame, stackTableSchema["frame"], UInt); + GET_JSON(stackTableData, stackTable["data"], Array); + + GET_JSON(frameTableSchema, frameTable["schema"], Object); + EXPECT_GE(frameTableSchema.size(), 1u); + GET_JSON_VALUE(frameTableLocation, frameTableSchema["location"], UInt); + GET_JSON(frameTableData, frameTable["data"], Array); + + GET_JSON(samplesSchema, samples["schema"], Object); + GET_JSON_VALUE(sampleStackIndex, samplesSchema["stack"], UInt); + GET_JSON(samplesData, samples["data"], Array); + for (const Json::Value& sample : samplesData) { + ASSERT_TRUE(sample.isArray()); + if (sample.isValidIndex(sampleStackIndex)) { + if (!sample[sampleStackIndex].isNull()) { + GET_JSON_MUTABLE_VALUE(stack, sample[sampleStackIndex], UInt); + EXPECT_TRUE(stackTableData.isValidIndex(stack)); + for (;;) { + // `stack` (from the sample, or from the callee frame's "prefix" in + // the previous loop) points into the stackTable. + GET_JSON(stackTableEntry, stackTableData[stack], Array); + GET_JSON_VALUE(frame, stackTableEntry[stackTableFrame], UInt); + + // The stackTable entry's "frame" points into the frameTable. + EXPECT_TRUE(frameTableData.isValidIndex(frame)); + GET_JSON(frameTableEntry, frameTableData[frame], Array); + GET_JSON_VALUE(location, frameTableEntry[frameTableLocation], UInt); + + // The frameTable entry's "location" points at a string. + EXPECT_TRUE(stringTable.isValidIndex(location)); + + // The stackTable entry's "prefix" is null for the root frame. + if (stackTableEntry[stackTablePrefix].isNull()) { + break; + } + // Otherwise it recursively points at the caller in the stackTable. + GET_JSON_VALUE(prefix, stackTableEntry[stackTablePrefix], UInt); + EXPECT_TRUE(stackTableData.isValidIndex(prefix)); + stack = prefix; + } + } + } + } + } + + if (aWithMainThread) { + ASSERT_GT(threadCount, 0u); + GET_JSON(thread0, threads[0], Object); + EXPECT_EQ_JSON(thread0["name"], String, "GeckoMain"); + } + + EXPECT_HAS_JSON(aRoot["pausedRanges"], Array); + + const Json::Value& processes = aRoot["processes"]; + if (!processes.isNull()) { + ASSERT_TRUE(processes.isArray()); + const Json::ArrayIndex processCount = processes.size(); + for (Json::ArrayIndex i = 0; i < processCount; ++i) { + GET_JSON(process, processes[i], Object); + JSONRootCheck(process, aWithMainThread); + } + } + + GET_JSON(profilingLog, aRoot["profilingLog"], Object); + EXPECT_EQ(profilingLog.size(), 1u); + for (auto it = profilingLog.begin(); it != profilingLog.end(); ++it) { + // The key should be a pid. + const auto key = it.name(); + for (const auto letter : key) { + EXPECT_GE(letter, '0'); + EXPECT_LE(letter, '9'); + } + // And the value should be an object. + GET_JSON(logForPid, profilingLog[key], Object); + // Its content is not defined, but we expect at least these: + EXPECT_HAS_JSON(logForPid["profilingLogBegin_TSms"], Double); + EXPECT_HAS_JSON(logForPid["profilingLogEnd_TSms"], Double); + } +} + +// Check that various expected top properties are in the JSON, and then call the +// provided `aJSONCheckFunction` with the JSON root object. +template <typename JSONCheckFunction> +void JSONOutputCheck(const char* aOutput, + JSONCheckFunction&& aJSONCheckFunction) { + ASSERT_NE(aOutput, nullptr); + + JSONWhitespaceCheck(aOutput); + + // Extract JSON. + Json::Value parsedRoot; + Json::CharReaderBuilder builder; + const std::unique_ptr<Json::CharReader> reader(builder.newCharReader()); + ASSERT_TRUE( + reader->parse(aOutput, strchr(aOutput, '\0'), &parsedRoot, nullptr)); + + JSONRootCheck(parsedRoot); + + std::forward<JSONCheckFunction>(aJSONCheckFunction)(parsedRoot); +} + +// Returns `static_cast<SamplingState>(-1)` if callback could not be installed. +static SamplingState WaitForSamplingState() { + Atomic<int> samplingState{-1}; + + if (!profiler_callback_after_sampling([&](SamplingState aSamplingState) { + samplingState = static_cast<int>(aSamplingState); + })) { + return static_cast<SamplingState>(-1); + } + + while (samplingState == -1) { + } + + return static_cast<SamplingState>(static_cast<int>(samplingState)); +} + +typedef Vector<const char*> StrVec; + +static void InactiveFeaturesAndParamsCheck() { + int entries; + Maybe<double> duration; + double interval; + uint32_t features; + StrVec filters; + uint64_t activeTabID; + + ASSERT_TRUE(!profiler_is_active()); + ASSERT_TRUE(!profiler_feature_active(ProfilerFeature::MainThreadIO)); + ASSERT_TRUE(!profiler_feature_active(ProfilerFeature::NativeAllocations)); + + profiler_get_start_params(&entries, &duration, &interval, &features, &filters, + &activeTabID); + + ASSERT_TRUE(entries == 0); + ASSERT_TRUE(duration == Nothing()); + ASSERT_TRUE(interval == 0); + ASSERT_TRUE(features == 0); + ASSERT_TRUE(filters.empty()); + ASSERT_TRUE(activeTabID == 0); +} + +static void ActiveParamsCheck(int aEntries, double aInterval, + uint32_t aFeatures, const char** aFilters, + size_t aFiltersLen, uint64_t aActiveTabID, + const Maybe<double>& aDuration = Nothing()) { + int entries; + Maybe<double> duration; + double interval; + uint32_t features; + StrVec filters; + uint64_t activeTabID; + + profiler_get_start_params(&entries, &duration, &interval, &features, &filters, + &activeTabID); + + ASSERT_TRUE(entries == aEntries); + ASSERT_TRUE(duration == aDuration); + ASSERT_TRUE(interval == aInterval); + ASSERT_TRUE(features == aFeatures); + ASSERT_TRUE(filters.length() == aFiltersLen); + ASSERT_TRUE(activeTabID == aActiveTabID); + for (size_t i = 0; i < aFiltersLen; i++) { + ASSERT_TRUE(strcmp(filters[i], aFilters[i]) == 0); + } +} + +TEST(GeckoProfiler, FeaturesAndParams) +{ + InactiveFeaturesAndParamsCheck(); + + // Try a couple of features and filters. + { + uint32_t features = ProfilerFeature::JS; + const char* filters[] = {"GeckoMain", "Compositor"}; + +# define PROFILER_DEFAULT_DURATION 20 /* seconds, for tests only */ + profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, + features, filters, MOZ_ARRAY_LENGTH(filters), 100, + Some(PROFILER_DEFAULT_DURATION)); + + ASSERT_TRUE(profiler_is_active()); + ASSERT_TRUE(!profiler_feature_active(ProfilerFeature::MainThreadIO)); + ASSERT_TRUE(!profiler_feature_active(ProfilerFeature::IPCMessages)); + + ActiveParamsCheck(PROFILER_DEFAULT_ENTRIES.Value(), + PROFILER_DEFAULT_INTERVAL, features, filters, + MOZ_ARRAY_LENGTH(filters), 100, + Some(PROFILER_DEFAULT_DURATION)); + + profiler_stop(); + + InactiveFeaturesAndParamsCheck(); + } + + // Try some different features and filters. + { + uint32_t features = + ProfilerFeature::MainThreadIO | ProfilerFeature::IPCMessages; + const char* filters[] = {"GeckoMain", "Foo", "Bar"}; + + // Testing with some arbitrary buffer size (as could be provided by + // external code), which we convert to the appropriate power of 2. + profiler_start(PowerOfTwo32(999999), 3, features, filters, + MOZ_ARRAY_LENGTH(filters), 123, Some(25.0)); + + ASSERT_TRUE(profiler_is_active()); + ASSERT_TRUE(profiler_feature_active(ProfilerFeature::MainThreadIO)); + ASSERT_TRUE(profiler_feature_active(ProfilerFeature::IPCMessages)); + + ActiveParamsCheck(int(PowerOfTwo32(999999).Value()), 3, features, filters, + MOZ_ARRAY_LENGTH(filters), 123, Some(25.0)); + + profiler_stop(); + + InactiveFeaturesAndParamsCheck(); + } + + // Try with no duration + { + uint32_t features = + ProfilerFeature::MainThreadIO | ProfilerFeature::IPCMessages; + const char* filters[] = {"GeckoMain", "Foo", "Bar"}; + + profiler_start(PowerOfTwo32(999999), 3, features, filters, + MOZ_ARRAY_LENGTH(filters), 0, Nothing()); + + ASSERT_TRUE(profiler_is_active()); + ASSERT_TRUE(profiler_feature_active(ProfilerFeature::MainThreadIO)); + ASSERT_TRUE(profiler_feature_active(ProfilerFeature::IPCMessages)); + + ActiveParamsCheck(int(PowerOfTwo32(999999).Value()), 3, features, filters, + MOZ_ARRAY_LENGTH(filters), 0, Nothing()); + + profiler_stop(); + + InactiveFeaturesAndParamsCheck(); + } + + // Try all supported features, and filters that match all threads. + { + uint32_t availableFeatures = profiler_get_available_features(); + const char* filters[] = {""}; + + profiler_start(PowerOfTwo32(88888), 10, availableFeatures, filters, + MOZ_ARRAY_LENGTH(filters), 0, Some(15.0)); + + ASSERT_TRUE(profiler_is_active()); + ASSERT_TRUE(profiler_feature_active(ProfilerFeature::MainThreadIO)); + ASSERT_TRUE(profiler_feature_active(ProfilerFeature::IPCMessages)); + + ActiveParamsCheck(PowerOfTwo32(88888).Value(), 10, availableFeatures, + filters, MOZ_ARRAY_LENGTH(filters), 0, Some(15.0)); + + // Don't call profiler_stop() here. + } + + // Try no features, and filters that match no threads. + { + uint32_t features = 0; + const char* filters[] = {"NoThreadWillMatchThis"}; + + // Second profiler_start() call in a row without an intervening + // profiler_stop(); this will do an implicit profiler_stop() and restart. + profiler_start(PowerOfTwo32(0), 0, features, filters, + MOZ_ARRAY_LENGTH(filters), 0, Some(0.0)); + + ASSERT_TRUE(profiler_is_active()); + ASSERT_TRUE(!profiler_feature_active(ProfilerFeature::MainThreadIO)); + ASSERT_TRUE(!profiler_feature_active(ProfilerFeature::IPCMessages)); + + // Entries and intervals go to defaults if 0 is specified. + ActiveParamsCheck(PROFILER_DEFAULT_ENTRIES.Value(), + PROFILER_DEFAULT_INTERVAL, features, filters, + MOZ_ARRAY_LENGTH(filters), 0, Nothing()); + + profiler_stop(); + + InactiveFeaturesAndParamsCheck(); + + // These calls are no-ops. + profiler_stop(); + profiler_stop(); + + InactiveFeaturesAndParamsCheck(); + } +} + +TEST(GeckoProfiler, EnsureStarted) +{ + InactiveFeaturesAndParamsCheck(); + + uint32_t features = ProfilerFeature::JS; + const char* filters[] = {"GeckoMain", "Compositor"}; + { + // Inactive -> Active + profiler_ensure_started(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, + features, filters, MOZ_ARRAY_LENGTH(filters), 0, + Some(PROFILER_DEFAULT_DURATION)); + + ActiveParamsCheck( + PROFILER_DEFAULT_ENTRIES.Value(), PROFILER_DEFAULT_INTERVAL, features, + filters, MOZ_ARRAY_LENGTH(filters), 0, Some(PROFILER_DEFAULT_DURATION)); + } + + { + // Active -> Active with same settings + + Maybe<ProfilerBufferInfo> info0 = profiler_get_buffer_info(); + ASSERT_TRUE(info0->mRangeEnd > 0); + + // First, write some samples into the buffer. + PR_Sleep(PR_MillisecondsToInterval(500)); + + Maybe<ProfilerBufferInfo> info1 = profiler_get_buffer_info(); + ASSERT_TRUE(info1->mRangeEnd > info0->mRangeEnd); + + // Call profiler_ensure_started with the same settings as before. + // This operation must not clear our buffer! + profiler_ensure_started(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, + features, filters, MOZ_ARRAY_LENGTH(filters), 0, + Some(PROFILER_DEFAULT_DURATION)); + + ActiveParamsCheck( + PROFILER_DEFAULT_ENTRIES.Value(), PROFILER_DEFAULT_INTERVAL, features, + filters, MOZ_ARRAY_LENGTH(filters), 0, Some(PROFILER_DEFAULT_DURATION)); + + // Check that our position in the buffer stayed the same or advanced, but + // not by much, and the range-start after profiler_ensure_started shouldn't + // have passed the range-end before. + Maybe<ProfilerBufferInfo> info2 = profiler_get_buffer_info(); + ASSERT_TRUE(info2->mRangeEnd >= info1->mRangeEnd); + ASSERT_TRUE(info2->mRangeEnd - info1->mRangeEnd < + info1->mRangeEnd - info0->mRangeEnd); + ASSERT_TRUE(info2->mRangeStart < info1->mRangeEnd); + } + + { + // Active -> Active with *different* settings + + Maybe<ProfilerBufferInfo> info1 = profiler_get_buffer_info(); + + // Call profiler_ensure_started with a different feature set than the one + // it's currently running with. This is supposed to stop and restart the + // profiler, thereby discarding the buffer contents. + uint32_t differentFeatures = features | ProfilerFeature::CPUUtilization; + profiler_ensure_started(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, + differentFeatures, filters, + MOZ_ARRAY_LENGTH(filters), 0); + + ActiveParamsCheck(PROFILER_DEFAULT_ENTRIES.Value(), + PROFILER_DEFAULT_INTERVAL, differentFeatures, filters, + MOZ_ARRAY_LENGTH(filters), 0); + + // Check the the buffer was cleared, so its range-start should be at/after + // its range-end before. + Maybe<ProfilerBufferInfo> info2 = profiler_get_buffer_info(); + ASSERT_TRUE(info2->mRangeStart >= info1->mRangeEnd); + } + + { + // Active -> Inactive + + profiler_stop(); + + InactiveFeaturesAndParamsCheck(); + } +} + +TEST(GeckoProfiler, MultiRegistration) +{ + // This whole test only checks that function calls don't crash, they don't + // actually verify that threads get profiled or not. + + { + std::thread thread([]() { + char top; + profiler_register_thread("thread, no unreg", &top); + }); + thread.join(); + } + + { + std::thread thread([]() { profiler_unregister_thread(); }); + thread.join(); + } + + { + std::thread thread([]() { + char top; + profiler_register_thread("thread 1st", &top); + profiler_unregister_thread(); + profiler_register_thread("thread 2nd", &top); + profiler_unregister_thread(); + }); + thread.join(); + } + + { + std::thread thread([]() { + char top; + profiler_register_thread("thread once", &top); + profiler_register_thread("thread again", &top); + profiler_unregister_thread(); + }); + thread.join(); + } + + { + std::thread thread([]() { + char top; + profiler_register_thread("thread to unreg twice", &top); + profiler_unregister_thread(); + profiler_unregister_thread(); + }); + thread.join(); + } +} + +TEST(GeckoProfiler, DifferentThreads) +{ + InactiveFeaturesAndParamsCheck(); + + nsCOMPtr<nsIThread> thread; + nsresult rv = NS_NewNamedThread("GeckoProfGTest", getter_AddRefs(thread)); + ASSERT_NS_SUCCEEDED(rv); + + // Control the profiler on a background thread and verify flags on the + // main thread. + { + uint32_t features = ProfilerFeature::JS; + const char* filters[] = {"GeckoMain", "Compositor"}; + + NS_DispatchAndSpinEventLoopUntilComplete( + "GeckoProfiler_DifferentThreads_Test::TestBody"_ns, thread, + NS_NewRunnableFunction( + "GeckoProfiler_DifferentThreads_Test::TestBody", [&]() { + profiler_start(PROFILER_DEFAULT_ENTRIES, + PROFILER_DEFAULT_INTERVAL, features, filters, + MOZ_ARRAY_LENGTH(filters), 0); + })); + + ASSERT_TRUE(profiler_is_active()); + ASSERT_TRUE(!profiler_feature_active(ProfilerFeature::MainThreadIO)); + ASSERT_TRUE(!profiler_feature_active(ProfilerFeature::IPCMessages)); + + ActiveParamsCheck(PROFILER_DEFAULT_ENTRIES.Value(), + PROFILER_DEFAULT_INTERVAL, features, filters, + MOZ_ARRAY_LENGTH(filters), 0); + + NS_DispatchAndSpinEventLoopUntilComplete( + "GeckoProfiler_DifferentThreads_Test::TestBody"_ns, thread, + NS_NewRunnableFunction("GeckoProfiler_DifferentThreads_Test::TestBody", + [&]() { profiler_stop(); })); + + InactiveFeaturesAndParamsCheck(); + } + + // Control the profiler on the main thread and verify flags on a + // background thread. + { + uint32_t features = ProfilerFeature::JS; + const char* filters[] = {"GeckoMain", "Compositor"}; + + profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, + features, filters, MOZ_ARRAY_LENGTH(filters), 0); + + NS_DispatchAndSpinEventLoopUntilComplete( + "GeckoProfiler_DifferentThreads_Test::TestBody"_ns, thread, + NS_NewRunnableFunction( + "GeckoProfiler_DifferentThreads_Test::TestBody", [&]() { + ASSERT_TRUE(profiler_is_active()); + ASSERT_TRUE( + !profiler_feature_active(ProfilerFeature::MainThreadIO)); + ASSERT_TRUE( + !profiler_feature_active(ProfilerFeature::IPCMessages)); + + ActiveParamsCheck(PROFILER_DEFAULT_ENTRIES.Value(), + PROFILER_DEFAULT_INTERVAL, features, filters, + MOZ_ARRAY_LENGTH(filters), 0); + })); + + profiler_stop(); + + NS_DispatchAndSpinEventLoopUntilComplete( + "GeckoProfiler_DifferentThreads_Test::TestBody"_ns, thread, + NS_NewRunnableFunction("GeckoProfiler_DifferentThreads_Test::TestBody", + [&]() { InactiveFeaturesAndParamsCheck(); })); + } + + thread->Shutdown(); +} + +TEST(GeckoProfiler, GetBacktrace) +{ + ASSERT_TRUE(!profiler_get_backtrace()); + + { + uint32_t features = ProfilerFeature::StackWalk; + const char* filters[] = {"GeckoMain"}; + + profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, + features, filters, MOZ_ARRAY_LENGTH(filters), 0); + + // These will be destroyed while the profiler is active. + static const int N = 100; + { + UniqueProfilerBacktrace u[N]; + for (int i = 0; i < N; i++) { + u[i] = profiler_get_backtrace(); + ASSERT_TRUE(u[i]); + } + } + + // These will be destroyed after the profiler stops. + UniqueProfilerBacktrace u[N]; + for (int i = 0; i < N; i++) { + u[i] = profiler_get_backtrace(); + ASSERT_TRUE(u[i]); + } + + profiler_stop(); + } + + ASSERT_TRUE(!profiler_get_backtrace()); +} + +TEST(GeckoProfiler, Pause) +{ + profiler_init_main_thread_id(); + ASSERT_TRUE(profiler_is_main_thread()) + << "This test must run on the main thread"; + + uint32_t features = ProfilerFeature::StackWalk; + const char* filters[] = {"GeckoMain", "Profiled GeckoProfiler.Pause"}; + + ASSERT_TRUE(!profiler_is_paused()); + for (ThreadProfilingFeatures features : scEachAndAnyThreadProfilingFeatures) { + ASSERT_TRUE(!profiler_thread_is_being_profiled(features)); + ASSERT_TRUE( + !profiler_thread_is_being_profiled(ProfilerThreadId{}, features)); + ASSERT_TRUE(!profiler_thread_is_being_profiled(profiler_current_thread_id(), + features)); + ASSERT_TRUE(!profiler_thread_is_being_profiled(profiler_main_thread_id(), + features)); + } + + std::thread{[&]() { + { + for (ThreadProfilingFeatures features : + scEachAndAnyThreadProfilingFeatures) { + ASSERT_TRUE(!profiler_thread_is_being_profiled(features)); + ASSERT_TRUE( + !profiler_thread_is_being_profiled(ProfilerThreadId{}, features)); + ASSERT_TRUE(!profiler_thread_is_being_profiled( + profiler_current_thread_id(), features)); + ASSERT_TRUE(!profiler_thread_is_being_profiled( + profiler_main_thread_id(), features)); + } + } + { + AUTO_PROFILER_REGISTER_THREAD( + "Ignored GeckoProfiler.Pause - before start"); + for (ThreadProfilingFeatures features : + scEachAndAnyThreadProfilingFeatures) { + ASSERT_TRUE(!profiler_thread_is_being_profiled(features)); + ASSERT_TRUE( + !profiler_thread_is_being_profiled(ProfilerThreadId{}, features)); + ASSERT_TRUE(!profiler_thread_is_being_profiled( + profiler_current_thread_id(), features)); + ASSERT_TRUE(!profiler_thread_is_being_profiled( + profiler_main_thread_id(), features)); + } + } + { + AUTO_PROFILER_REGISTER_THREAD( + "Profiled GeckoProfiler.Pause - before start"); + for (ThreadProfilingFeatures features : + scEachAndAnyThreadProfilingFeatures) { + ASSERT_TRUE(!profiler_thread_is_being_profiled(features)); + ASSERT_TRUE( + !profiler_thread_is_being_profiled(ProfilerThreadId{}, features)); + ASSERT_TRUE(!profiler_thread_is_being_profiled( + profiler_current_thread_id(), features)); + ASSERT_TRUE(!profiler_thread_is_being_profiled( + profiler_main_thread_id(), features)); + } + } + }}.join(); + + profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, features, + filters, MOZ_ARRAY_LENGTH(filters), 0); + + ASSERT_TRUE(!profiler_is_paused()); + for (ThreadProfilingFeatures features : scEachAndAnyThreadProfilingFeatures) { + ASSERT_TRUE(profiler_thread_is_being_profiled(features)); + ASSERT_TRUE( + profiler_thread_is_being_profiled(ProfilerThreadId{}, features)); + ASSERT_TRUE(profiler_thread_is_being_profiled(profiler_current_thread_id(), + features)); + } + + std::thread{[&]() { + { + for (ThreadProfilingFeatures features : + scEachAndAnyThreadProfilingFeatures) { + ASSERT_TRUE(!profiler_thread_is_being_profiled(features)); + ASSERT_TRUE( + !profiler_thread_is_being_profiled(ProfilerThreadId{}, features)); + ASSERT_TRUE(!profiler_thread_is_being_profiled( + profiler_current_thread_id(), features)); + ASSERT_TRUE(profiler_thread_is_being_profiled(profiler_main_thread_id(), + features)); + } + } + { + AUTO_PROFILER_REGISTER_THREAD( + "Ignored GeckoProfiler.Pause - after start"); + for (ThreadProfilingFeatures features : + scEachAndAnyThreadProfilingFeatures) { + ASSERT_TRUE(!profiler_thread_is_being_profiled(features)); + ASSERT_TRUE( + !profiler_thread_is_being_profiled(ProfilerThreadId{}, features)); + ASSERT_TRUE(!profiler_thread_is_being_profiled( + profiler_current_thread_id(), features)); + ASSERT_TRUE(profiler_thread_is_being_profiled(profiler_main_thread_id(), + features)); + } + } + { + AUTO_PROFILER_REGISTER_THREAD( + "Profiled GeckoProfiler.Pause - after start"); + for (ThreadProfilingFeatures features : + scEachAndAnyThreadProfilingFeatures) { + ASSERT_TRUE(profiler_thread_is_being_profiled(features)); + ASSERT_TRUE( + profiler_thread_is_being_profiled(ProfilerThreadId{}, features)); + ASSERT_TRUE(profiler_thread_is_being_profiled( + profiler_current_thread_id(), features)); + ASSERT_TRUE(profiler_thread_is_being_profiled(profiler_main_thread_id(), + features)); + } + } + }}.join(); + + // Check that we are writing samples while not paused. + Maybe<ProfilerBufferInfo> info1 = profiler_get_buffer_info(); + PR_Sleep(PR_MillisecondsToInterval(500)); + Maybe<ProfilerBufferInfo> info2 = profiler_get_buffer_info(); + ASSERT_TRUE(info1->mRangeEnd != info2->mRangeEnd); + + // Check that we are writing markers while not paused. + ASSERT_TRUE(profiler_thread_is_being_profiled_for_markers()); + ASSERT_TRUE( + profiler_thread_is_being_profiled_for_markers(ProfilerThreadId{})); + ASSERT_TRUE(profiler_thread_is_being_profiled_for_markers( + profiler_current_thread_id())); + ASSERT_TRUE( + profiler_thread_is_being_profiled_for_markers(profiler_main_thread_id())); + info1 = profiler_get_buffer_info(); + PROFILER_MARKER_UNTYPED("Not paused", OTHER, {}); + info2 = profiler_get_buffer_info(); + ASSERT_TRUE(info1->mRangeEnd != info2->mRangeEnd); + + profiler_pause(); + + ASSERT_TRUE(profiler_is_paused()); + for (ThreadProfilingFeatures features : scEachAndAnyThreadProfilingFeatures) { + ASSERT_TRUE(!profiler_thread_is_being_profiled(features)); + ASSERT_TRUE( + !profiler_thread_is_being_profiled(ProfilerThreadId{}, features)); + ASSERT_TRUE(!profiler_thread_is_being_profiled(profiler_current_thread_id(), + features)); + } + ASSERT_TRUE(!profiler_thread_is_being_profiled_for_markers()); + ASSERT_TRUE( + !profiler_thread_is_being_profiled_for_markers(ProfilerThreadId{})); + ASSERT_TRUE(!profiler_thread_is_being_profiled_for_markers( + profiler_current_thread_id())); + ASSERT_TRUE(!profiler_thread_is_being_profiled_for_markers( + profiler_main_thread_id())); + + std::thread{[&]() { + { + for (ThreadProfilingFeatures features : + scEachAndAnyThreadProfilingFeatures) { + ASSERT_TRUE(!profiler_thread_is_being_profiled(features)); + ASSERT_TRUE( + !profiler_thread_is_being_profiled(ProfilerThreadId{}, features)); + ASSERT_TRUE(!profiler_thread_is_being_profiled( + profiler_current_thread_id(), features)); + ASSERT_TRUE(!profiler_thread_is_being_profiled( + profiler_main_thread_id(), features)); + } + } + { + AUTO_PROFILER_REGISTER_THREAD( + "Ignored GeckoProfiler.Pause - after pause"); + for (ThreadProfilingFeatures features : + scEachAndAnyThreadProfilingFeatures) { + ASSERT_TRUE(!profiler_thread_is_being_profiled(features)); + ASSERT_TRUE( + !profiler_thread_is_being_profiled(ProfilerThreadId{}, features)); + ASSERT_TRUE(!profiler_thread_is_being_profiled( + profiler_current_thread_id(), features)); + ASSERT_TRUE(!profiler_thread_is_being_profiled( + profiler_main_thread_id(), features)); + } + } + { + AUTO_PROFILER_REGISTER_THREAD( + "Profiled GeckoProfiler.Pause - after pause"); + for (ThreadProfilingFeatures features : + scEachAndAnyThreadProfilingFeatures) { + ASSERT_TRUE(!profiler_thread_is_being_profiled(features)); + ASSERT_TRUE( + !profiler_thread_is_being_profiled(ProfilerThreadId{}, features)); + ASSERT_TRUE(!profiler_thread_is_being_profiled( + profiler_current_thread_id(), features)); + ASSERT_TRUE(!profiler_thread_is_being_profiled( + profiler_main_thread_id(), features)); + } + } + }}.join(); + + // Check that we are not writing samples while paused. + info1 = profiler_get_buffer_info(); + PR_Sleep(PR_MillisecondsToInterval(500)); + info2 = profiler_get_buffer_info(); + ASSERT_TRUE(info1->mRangeEnd == info2->mRangeEnd); + + // Check that we are now writing markers while paused. + info1 = profiler_get_buffer_info(); + PROFILER_MARKER_UNTYPED("Paused", OTHER, {}); + info2 = profiler_get_buffer_info(); + ASSERT_TRUE(info1->mRangeEnd == info2->mRangeEnd); + PROFILER_MARKER_UNTYPED("Paused v2", OTHER, {}); + Maybe<ProfilerBufferInfo> info3 = profiler_get_buffer_info(); + ASSERT_TRUE(info2->mRangeEnd == info3->mRangeEnd); + + profiler_resume(); + + ASSERT_TRUE(!profiler_is_paused()); + for (ThreadProfilingFeatures features : scEachAndAnyThreadProfilingFeatures) { + ASSERT_TRUE(profiler_thread_is_being_profiled(features)); + ASSERT_TRUE( + profiler_thread_is_being_profiled(ProfilerThreadId{}, features)); + ASSERT_TRUE(profiler_thread_is_being_profiled(profiler_current_thread_id(), + features)); + } + + std::thread{[&]() { + { + for (ThreadProfilingFeatures features : + scEachAndAnyThreadProfilingFeatures) { + ASSERT_TRUE(!profiler_thread_is_being_profiled(features)); + ASSERT_TRUE( + !profiler_thread_is_being_profiled(ProfilerThreadId{}, features)); + ASSERT_TRUE(!profiler_thread_is_being_profiled( + profiler_current_thread_id(), features)); + ASSERT_TRUE(profiler_thread_is_being_profiled(profiler_main_thread_id(), + features)); + } + } + { + AUTO_PROFILER_REGISTER_THREAD( + "Ignored GeckoProfiler.Pause - after resume"); + for (ThreadProfilingFeatures features : + scEachAndAnyThreadProfilingFeatures) { + ASSERT_TRUE(!profiler_thread_is_being_profiled(features)); + ASSERT_TRUE( + !profiler_thread_is_being_profiled(ProfilerThreadId{}, features)); + ASSERT_TRUE(!profiler_thread_is_being_profiled( + profiler_current_thread_id(), features)); + ASSERT_TRUE(profiler_thread_is_being_profiled(profiler_main_thread_id(), + features)); + } + } + { + AUTO_PROFILER_REGISTER_THREAD( + "Profiled GeckoProfiler.Pause - after resume"); + for (ThreadProfilingFeatures features : + scEachAndAnyThreadProfilingFeatures) { + ASSERT_TRUE(profiler_thread_is_being_profiled(features)); + ASSERT_TRUE( + profiler_thread_is_being_profiled(ProfilerThreadId{}, features)); + ASSERT_TRUE(profiler_thread_is_being_profiled( + profiler_current_thread_id(), features)); + ASSERT_TRUE(profiler_thread_is_being_profiled(profiler_main_thread_id(), + features)); + } + } + }}.join(); + + profiler_stop(); + + ASSERT_TRUE(!profiler_is_paused()); + for (ThreadProfilingFeatures features : scEachAndAnyThreadProfilingFeatures) { + ASSERT_TRUE(!profiler_thread_is_being_profiled(features)); + ASSERT_TRUE( + !profiler_thread_is_being_profiled(ProfilerThreadId{}, features)); + ASSERT_TRUE(!profiler_thread_is_being_profiled(profiler_current_thread_id(), + features)); + } + + std::thread{[&]() { + { + for (ThreadProfilingFeatures features : + scEachAndAnyThreadProfilingFeatures) { + ASSERT_TRUE(!profiler_thread_is_being_profiled(features)); + ASSERT_TRUE( + !profiler_thread_is_being_profiled(ProfilerThreadId{}, features)); + ASSERT_TRUE(!profiler_thread_is_being_profiled( + profiler_current_thread_id(), features)); + ASSERT_TRUE(!profiler_thread_is_being_profiled( + profiler_main_thread_id(), features)); + } + } + { + AUTO_PROFILER_REGISTER_THREAD("Ignored GeckoProfiler.Pause - after stop"); + for (ThreadProfilingFeatures features : + scEachAndAnyThreadProfilingFeatures) { + ASSERT_TRUE(!profiler_thread_is_being_profiled(features)); + ASSERT_TRUE( + !profiler_thread_is_being_profiled(ProfilerThreadId{}, features)); + ASSERT_TRUE(!profiler_thread_is_being_profiled( + profiler_current_thread_id(), features)); + ASSERT_TRUE(!profiler_thread_is_being_profiled( + profiler_main_thread_id(), features)); + } + } + { + AUTO_PROFILER_REGISTER_THREAD( + "Profiled GeckoProfiler.Pause - after stop"); + for (ThreadProfilingFeatures features : + scEachAndAnyThreadProfilingFeatures) { + ASSERT_TRUE(!profiler_thread_is_being_profiled(features)); + ASSERT_TRUE( + !profiler_thread_is_being_profiled(ProfilerThreadId{}, features)); + ASSERT_TRUE(!profiler_thread_is_being_profiled( + profiler_current_thread_id(), features)); + ASSERT_TRUE(!profiler_thread_is_being_profiled( + profiler_main_thread_id(), features)); + } + } + }}.join(); +} + +TEST(GeckoProfiler, Markers) +{ + uint32_t features = ProfilerFeature::StackWalk; + const char* filters[] = {"GeckoMain"}; + + profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, features, + filters, MOZ_ARRAY_LENGTH(filters), 0); + + PROFILER_MARKER("tracing event", OTHER, {}, Tracing, "A"); + PROFILER_MARKER("tracing start", OTHER, MarkerTiming::IntervalStart(), + Tracing, "A"); + PROFILER_MARKER("tracing end", OTHER, MarkerTiming::IntervalEnd(), Tracing, + "A"); + + auto bt = profiler_capture_backtrace(); + PROFILER_MARKER("tracing event with stack", OTHER, + MarkerStack::TakeBacktrace(std::move(bt)), Tracing, "B"); + + { AUTO_PROFILER_TRACING_MARKER("C", "auto tracing", OTHER); } + + PROFILER_MARKER_UNTYPED("M1", OTHER, {}); + PROFILER_MARKER_UNTYPED("M3", OTHER, {}); + + // Create three strings: two that are the maximum allowed length, and one that + // is one char longer. + static const size_t kMax = ProfileBuffer::kMaxFrameKeyLength; + UniquePtr<char[]> okstr1 = MakeUnique<char[]>(kMax); + UniquePtr<char[]> okstr2 = MakeUnique<char[]>(kMax); + UniquePtr<char[]> longstr = MakeUnique<char[]>(kMax + 1); + UniquePtr<char[]> longstrCut = MakeUnique<char[]>(kMax + 1); + for (size_t i = 0; i < kMax; i++) { + okstr1[i] = 'a'; + okstr2[i] = 'b'; + longstr[i] = 'c'; + longstrCut[i] = 'c'; + } + okstr1[kMax - 1] = '\0'; + okstr2[kMax - 1] = '\0'; + longstr[kMax] = '\0'; + longstrCut[kMax] = '\0'; + // Should be output as-is. + AUTO_PROFILER_LABEL_DYNAMIC_CSTR("", LAYOUT, ""); + AUTO_PROFILER_LABEL_DYNAMIC_CSTR("", LAYOUT, okstr1.get()); + // Should be output as label + space + okstr2. + AUTO_PROFILER_LABEL_DYNAMIC_CSTR("okstr2", LAYOUT, okstr2.get()); + // Should be output with kMax length, ending with "...\0". + AUTO_PROFILER_LABEL_DYNAMIC_CSTR("", LAYOUT, longstr.get()); + ASSERT_EQ(longstrCut[kMax - 4], 'c'); + longstrCut[kMax - 4] = '.'; + ASSERT_EQ(longstrCut[kMax - 3], 'c'); + longstrCut[kMax - 3] = '.'; + ASSERT_EQ(longstrCut[kMax - 2], 'c'); + longstrCut[kMax - 2] = '.'; + ASSERT_EQ(longstrCut[kMax - 1], 'c'); + longstrCut[kMax - 1] = '\0'; + + // Test basic markers 2.0. + EXPECT_TRUE( + profiler_add_marker("default-templated markers 2.0 with empty options", + geckoprofiler::category::OTHER, {})); + + PROFILER_MARKER_UNTYPED( + "default-templated markers 2.0 with option", OTHER, + MarkerStack::TakeBacktrace(profiler_capture_backtrace())); + + PROFILER_MARKER("explicitly-default-templated markers 2.0 with empty options", + OTHER, {}, NoPayload); + + EXPECT_TRUE(profiler_add_marker( + "explicitly-default-templated markers 2.0 with option", + geckoprofiler::category::OTHER, {}, + ::geckoprofiler::markers::NoPayload{})); + + // Used in markers below. + TimeStamp ts1 = TimeStamp::Now(); + + // Sleep briefly to ensure a sample is taken and the pending markers are + // processed. + PR_Sleep(PR_MillisecondsToInterval(500)); + + // Used in markers below. + TimeStamp ts2 = TimeStamp::Now(); + // ts1 and ts2 should be different thanks to the sleep. + EXPECT_NE(ts1, ts2); + + // Test most marker payloads. + + // Keep this one first! (It's used to record `ts1` and `ts2`, to compare + // to serialized numbers in other markers.) + EXPECT_TRUE(profiler_add_marker("FirstMarker", geckoprofiler::category::OTHER, + MarkerTiming::Interval(ts1, ts2), + geckoprofiler::markers::TextMarker{}, + "First Marker")); + + // User-defined marker type with different properties, and fake schema. + struct GtestMarker { + static constexpr Span<const char> MarkerTypeName() { + return MakeStringSpan("markers-gtest"); + } + static void StreamJSONMarkerData( + mozilla::baseprofiler::SpliceableJSONWriter& aWriter, int aInt, + double aDouble, const mozilla::ProfilerString8View& aText, + const mozilla::ProfilerString8View& aUniqueText, + const mozilla::TimeStamp& aTime) { + aWriter.NullProperty("null"); + aWriter.BoolProperty("bool-false", false); + aWriter.BoolProperty("bool-true", true); + aWriter.IntProperty("int", aInt); + aWriter.DoubleProperty("double", aDouble); + aWriter.StringProperty("text", aText); + aWriter.UniqueStringProperty("unique text", aUniqueText); + aWriter.UniqueStringProperty("unique text again", aUniqueText); + aWriter.TimeProperty("time", aTime); + } + static mozilla::MarkerSchema MarkerTypeDisplay() { + // Note: This is an test function that is not intended to actually output + // that correctly matches StreamJSONMarkerData data above! Instead we only + // test that it outputs the expected JSON at the end. + using MS = mozilla::MarkerSchema; + MS schema{MS::Location::MarkerChart, MS::Location::MarkerTable, + MS::Location::TimelineOverview, MS::Location::TimelineMemory, + MS::Location::TimelineIPC, MS::Location::TimelineFileIO, + MS::Location::StackChart}; + // All label functions. + schema.SetChartLabel("chart label"); + schema.SetTooltipLabel("tooltip label"); + schema.SetTableLabel("table label"); + // All data functions, all formats, all "searchable" values. + schema.AddKeyFormat("key with url", MS::Format::Url); + schema.AddKeyLabelFormat("key with label filePath", "label filePath", + MS::Format::FilePath); + schema.AddKeyFormatSearchable("key with string not-searchable", + MS::Format::String, + MS::Searchable::NotSearchable); + schema.AddKeyLabelFormatSearchable("key with label duration searchable", + "label duration", MS::Format::Duration, + MS::Searchable::Searchable); + schema.AddKeyFormat("key with time", MS::Format::Time); + schema.AddKeyFormat("key with seconds", MS::Format::Seconds); + schema.AddKeyFormat("key with milliseconds", MS::Format::Milliseconds); + schema.AddKeyFormat("key with microseconds", MS::Format::Microseconds); + schema.AddKeyFormat("key with nanoseconds", MS::Format::Nanoseconds); + schema.AddKeyFormat("key with bytes", MS::Format::Bytes); + schema.AddKeyFormat("key with percentage", MS::Format::Percentage); + schema.AddKeyFormat("key with integer", MS::Format::Integer); + schema.AddKeyFormat("key with decimal", MS::Format::Decimal); + schema.AddStaticLabelValue("static label", "static value"); + return schema; + } + }; + EXPECT_TRUE( + profiler_add_marker("Gtest custom marker", geckoprofiler::category::OTHER, + MarkerTiming::Interval(ts1, ts2), GtestMarker{}, 42, + 43.0, "gtest text", "gtest unique text", ts1)); + + // User-defined marker type with no data, special frontend schema. + struct GtestSpecialMarker { + static constexpr Span<const char> MarkerTypeName() { + return MakeStringSpan("markers-gtest-special"); + } + static void StreamJSONMarkerData( + mozilla::baseprofiler::SpliceableJSONWriter& aWriter) {} + static mozilla::MarkerSchema MarkerTypeDisplay() { + return mozilla::MarkerSchema::SpecialFrontendLocation{}; + } + }; + EXPECT_TRUE(profiler_add_marker("Gtest special marker", + geckoprofiler::category::OTHER, {}, + GtestSpecialMarker{})); + + // User-defined marker type that is never used, so it shouldn't appear in the + // output. + struct GtestUnusedMarker { + static constexpr Span<const char> MarkerTypeName() { + return MakeStringSpan("markers-gtest-unused"); + } + static void StreamJSONMarkerData( + mozilla::baseprofiler::SpliceableJSONWriter& aWriter) {} + static mozilla::MarkerSchema MarkerTypeDisplay() { + return mozilla::MarkerSchema::SpecialFrontendLocation{}; + } + }; + + // Make sure the compiler doesn't complain about this unused struct. + mozilla::Unused << GtestUnusedMarker{}; + + // Other markers in alphabetical order of payload class names. + + nsCOMPtr<nsIURI> uri; + ASSERT_TRUE( + NS_SUCCEEDED(NS_NewURI(getter_AddRefs(uri), "http://mozilla.org/"_ns))); + // The marker name will be "Load <aChannelId>: <aURI>". + profiler_add_network_marker( + /* nsIURI* aURI */ uri, + /* const nsACString& aRequestMethod */ "GET"_ns, + /* int32_t aPriority */ 34, + /* uint64_t aChannelId */ 1, + /* NetworkLoadType aType */ net::NetworkLoadType::LOAD_START, + /* mozilla::TimeStamp aStart */ ts1, + /* mozilla::TimeStamp aEnd */ ts2, + /* int64_t aCount */ 56, + /* mozilla::net::CacheDisposition aCacheDisposition */ + net::kCacheHit, + /* uint64_t aInnerWindowID */ 78, + /* bool aIsPrivateBrowsing */ false + /* const mozilla::net::TimingStruct* aTimings = nullptr */ + /* mozilla::UniquePtr<mozilla::ProfileChunkedBuffer> aSource = + nullptr */ + /* const mozilla::Maybe<nsDependentCString>& aContentType = + mozilla::Nothing() */ + /* nsIURI* aRedirectURI = nullptr */ + /* uint64_t aRedirectChannelId = 0 */ + ); + + profiler_add_network_marker( + /* nsIURI* aURI */ uri, + /* const nsACString& aRequestMethod */ "GET"_ns, + /* int32_t aPriority */ 34, + /* uint64_t aChannelId */ 2, + /* NetworkLoadType aType */ net::NetworkLoadType::LOAD_STOP, + /* mozilla::TimeStamp aStart */ ts1, + /* mozilla::TimeStamp aEnd */ ts2, + /* int64_t aCount */ 56, + /* mozilla::net::CacheDisposition aCacheDisposition */ + net::kCacheUnresolved, + /* uint64_t aInnerWindowID */ 78, + /* bool aIsPrivateBrowsing */ false, + /* const mozilla::net::TimingStruct* aTimings = nullptr */ nullptr, + /* mozilla::UniquePtr<mozilla::ProfileChunkedBuffer> aSource = + nullptr */ + nullptr, + /* const mozilla::Maybe<nsDependentCString>& aContentType = + mozilla::Nothing() */ + Some(nsDependentCString("text/html")), + /* nsIURI* aRedirectURI = nullptr */ nullptr, + /* uint64_t aRedirectChannelId = 0 */ 0); + + nsCOMPtr<nsIURI> redirectURI; + ASSERT_TRUE(NS_SUCCEEDED( + NS_NewURI(getter_AddRefs(redirectURI), "http://example.com/"_ns))); + profiler_add_network_marker( + /* nsIURI* aURI */ uri, + /* const nsACString& aRequestMethod */ "GET"_ns, + /* int32_t aPriority */ 34, + /* uint64_t aChannelId */ 3, + /* NetworkLoadType aType */ net::NetworkLoadType::LOAD_REDIRECT, + /* mozilla::TimeStamp aStart */ ts1, + /* mozilla::TimeStamp aEnd */ ts2, + /* int64_t aCount */ 56, + /* mozilla::net::CacheDisposition aCacheDisposition */ + net::kCacheUnresolved, + /* uint64_t aInnerWindowID */ 78, + /* bool aIsPrivateBrowsing */ false, + /* const mozilla::net::TimingStruct* aTimings = nullptr */ nullptr, + /* mozilla::UniquePtr<mozilla::ProfileChunkedBuffer> aSource = + nullptr */ + nullptr, + /* const mozilla::Maybe<nsDependentCString>& aContentType = + mozilla::Nothing() */ + mozilla::Nothing(), + /* nsIURI* aRedirectURI = nullptr */ redirectURI, + /* uint32_t aRedirectFlags = 0 */ + nsIChannelEventSink::REDIRECT_TEMPORARY, + /* uint64_t aRedirectChannelId = 0 */ 103); + + profiler_add_network_marker( + /* nsIURI* aURI */ uri, + /* const nsACString& aRequestMethod */ "GET"_ns, + /* int32_t aPriority */ 34, + /* uint64_t aChannelId */ 4, + /* NetworkLoadType aType */ net::NetworkLoadType::LOAD_REDIRECT, + /* mozilla::TimeStamp aStart */ ts1, + /* mozilla::TimeStamp aEnd */ ts2, + /* int64_t aCount */ 56, + /* mozilla::net::CacheDisposition aCacheDisposition */ + net::kCacheUnresolved, + /* uint64_t aInnerWindowID */ 78, + /* bool aIsPrivateBrowsing */ false, + /* const mozilla::net::TimingStruct* aTimings = nullptr */ nullptr, + /* mozilla::UniquePtr<mozilla::ProfileChunkedBuffer> aSource = + nullptr */ + nullptr, + /* const mozilla::Maybe<nsDependentCString>& aContentType = + mozilla::Nothing() */ + mozilla::Nothing(), + /* nsIURI* aRedirectURI = nullptr */ redirectURI, + /* uint32_t aRedirectFlags = 0 */ + nsIChannelEventSink::REDIRECT_PERMANENT, + /* uint64_t aRedirectChannelId = 0 */ 104); + + profiler_add_network_marker( + /* nsIURI* aURI */ uri, + /* const nsACString& aRequestMethod */ "GET"_ns, + /* int32_t aPriority */ 34, + /* uint64_t aChannelId */ 5, + /* NetworkLoadType aType */ net::NetworkLoadType::LOAD_REDIRECT, + /* mozilla::TimeStamp aStart */ ts1, + /* mozilla::TimeStamp aEnd */ ts2, + /* int64_t aCount */ 56, + /* mozilla::net::CacheDisposition aCacheDisposition */ + net::kCacheUnresolved, + /* uint64_t aInnerWindowID */ 78, + /* bool aIsPrivateBrowsing */ false, + /* const mozilla::net::TimingStruct* aTimings = nullptr */ nullptr, + /* mozilla::UniquePtr<mozilla::ProfileChunkedBuffer> aSource = + nullptr */ + nullptr, + /* const mozilla::Maybe<nsDependentCString>& aContentType = + mozilla::Nothing() */ + mozilla::Nothing(), + /* nsIURI* aRedirectURI = nullptr */ redirectURI, + /* uint32_t aRedirectFlags = 0 */ nsIChannelEventSink::REDIRECT_INTERNAL, + /* uint64_t aRedirectChannelId = 0 */ 105); + + profiler_add_network_marker( + /* nsIURI* aURI */ uri, + /* const nsACString& aRequestMethod */ "GET"_ns, + /* int32_t aPriority */ 34, + /* uint64_t aChannelId */ 6, + /* NetworkLoadType aType */ net::NetworkLoadType::LOAD_REDIRECT, + /* mozilla::TimeStamp aStart */ ts1, + /* mozilla::TimeStamp aEnd */ ts2, + /* int64_t aCount */ 56, + /* mozilla::net::CacheDisposition aCacheDisposition */ + net::kCacheUnresolved, + /* uint64_t aInnerWindowID */ 78, + /* bool aIsPrivateBrowsing */ false, + /* const mozilla::net::TimingStruct* aTimings = nullptr */ nullptr, + /* mozilla::UniquePtr<mozilla::ProfileChunkedBuffer> aSource = + nullptr */ + nullptr, + /* const mozilla::Maybe<nsDependentCString>& aContentType = + mozilla::Nothing() */ + mozilla::Nothing(), + /* nsIURI* aRedirectURI = nullptr */ redirectURI, + /* uint32_t aRedirectFlags = 0 */ nsIChannelEventSink::REDIRECT_INTERNAL | + nsIChannelEventSink::REDIRECT_STS_UPGRADE, + /* uint64_t aRedirectChannelId = 0 */ 106); + profiler_add_network_marker( + /* nsIURI* aURI */ uri, + /* const nsACString& aRequestMethod */ "GET"_ns, + /* int32_t aPriority */ 34, + /* uint64_t aChannelId */ 7, + /* NetworkLoadType aType */ net::NetworkLoadType::LOAD_START, + /* mozilla::TimeStamp aStart */ ts1, + /* mozilla::TimeStamp aEnd */ ts2, + /* int64_t aCount */ 56, + /* mozilla::net::CacheDisposition aCacheDisposition */ + net::kCacheUnresolved, + /* uint64_t aInnerWindowID */ 78, + /* bool aIsPrivateBrowsing */ true + /* const mozilla::net::TimingStruct* aTimings = nullptr */ + /* mozilla::UniquePtr<mozilla::ProfileChunkedBuffer> aSource = + nullptr */ + /* const mozilla::Maybe<nsDependentCString>& aContentType = + mozilla::Nothing() */ + /* nsIURI* aRedirectURI = nullptr */ + /* uint64_t aRedirectChannelId = 0 */ + ); + + EXPECT_TRUE(profiler_add_marker( + "Text in main thread with stack", geckoprofiler::category::OTHER, + {MarkerStack::Capture(), MarkerTiming::Interval(ts1, ts2)}, + geckoprofiler::markers::TextMarker{}, "")); + EXPECT_TRUE(profiler_add_marker( + "Text from main thread with stack", geckoprofiler::category::OTHER, + MarkerOptions(MarkerThreadId::MainThread(), MarkerStack::Capture()), + geckoprofiler::markers::TextMarker{}, "")); + + std::thread registeredThread([]() { + AUTO_PROFILER_REGISTER_THREAD("Marker test sub-thread"); + // Marker in non-profiled thread won't be stored. + EXPECT_FALSE(profiler_add_marker( + "Text in registered thread with stack", geckoprofiler::category::OTHER, + MarkerStack::Capture(), geckoprofiler::markers::TextMarker{}, "")); + // Marker will be stored in main thread, with stack from registered thread. + EXPECT_TRUE(profiler_add_marker( + "Text from registered thread with stack", + geckoprofiler::category::OTHER, + MarkerOptions(MarkerThreadId::MainThread(), MarkerStack::Capture()), + geckoprofiler::markers::TextMarker{}, "")); + }); + registeredThread.join(); + + std::thread unregisteredThread([]() { + // Marker in unregistered thread won't be stored. + EXPECT_FALSE(profiler_add_marker("Text in unregistered thread with stack", + geckoprofiler::category::OTHER, + MarkerStack::Capture(), + geckoprofiler::markers::TextMarker{}, "")); + // Marker will be stored in main thread, but stack cannot be captured in an + // unregistered thread. + EXPECT_TRUE(profiler_add_marker( + "Text from unregistered thread with stack", + geckoprofiler::category::OTHER, + MarkerOptions(MarkerThreadId::MainThread(), MarkerStack::Capture()), + geckoprofiler::markers::TextMarker{}, "")); + }); + unregisteredThread.join(); + + EXPECT_TRUE(profiler_add_marker("Tracing", geckoprofiler::category::OTHER, {}, + geckoprofiler::markers::Tracing{}, + "category")); + + EXPECT_TRUE(profiler_add_marker("Text", geckoprofiler::category::OTHER, {}, + geckoprofiler::markers::TextMarker{}, + "Text text")); + + // Ensure that we evaluate to false for markers with very long texts by + // testing against a ~3mb string. A string of this size should exceed the + // available buffer chunks (max: 2) that are available and be discarded. + EXPECT_FALSE(profiler_add_marker("Text", geckoprofiler::category::OTHER, {}, + geckoprofiler::markers::TextMarker{}, + std::string(3 * 1024 * 1024, 'x'))); + + EXPECT_TRUE(profiler_add_marker( + "MediaSample", geckoprofiler::category::OTHER, {}, + geckoprofiler::markers::MediaSampleMarker{}, 123, 456, 789)); + + SpliceableChunkedJSONWriter w{FailureLatchInfallibleSource::Singleton()}; + w.Start(); + EXPECT_TRUE(::profiler_stream_json_for_this_process(w).isOk()); + w.End(); + + EXPECT_FALSE(w.Failed()); + + UniquePtr<char[]> profile = w.ChunkedWriteFunc().CopyData(); + ASSERT_TRUE(!!profile.get()); + + // Expected markers, in order. + enum State { + S_tracing_event, + S_tracing_start, + S_tracing_end, + S_tracing_event_with_stack, + S_tracing_auto_tracing_start, + S_tracing_auto_tracing_end, + S_M1, + S_M3, + S_Markers2DefaultEmptyOptions, + S_Markers2DefaultWithOptions, + S_Markers2ExplicitDefaultEmptyOptions, + S_Markers2ExplicitDefaultWithOptions, + S_FirstMarker, + S_CustomMarker, + S_SpecialMarker, + S_NetworkMarkerPayload_start, + S_NetworkMarkerPayload_stop, + S_NetworkMarkerPayload_redirect_temporary, + S_NetworkMarkerPayload_redirect_permanent, + S_NetworkMarkerPayload_redirect_internal, + S_NetworkMarkerPayload_redirect_internal_sts, + S_NetworkMarkerPayload_private_browsing, + + S_TextWithStack, + S_TextToMTWithStack, + S_RegThread_TextToMTWithStack, + S_UnregThread_TextToMTWithStack, + + S_LAST, + } state = State(0); + + // These will be set when first read from S_FirstMarker, then + // compared in following markers. + // TODO: Compute these values from the timestamps. + double ts1Double = 0.0; + double ts2Double = 0.0; + + JSONOutputCheck(profile.get(), [&](const Json::Value& root) { + { + GET_JSON(threads, root["threads"], Array); + ASSERT_EQ(threads.size(), 1u); + + { + GET_JSON(thread0, threads[0], Object); + + // Keep a reference to the string table in this block, it will be used + // below. + GET_JSON(stringTable, thread0["stringTable"], Array); + ASSERT_TRUE(stringTable.isArray()); + + // Test the expected labels in the string table. + bool foundEmpty = false; + bool foundOkstr1 = false; + bool foundOkstr2 = false; + const std::string okstr2Label = std::string("okstr2 ") + okstr2.get(); + bool foundTooLong = false; + for (const auto& s : stringTable) { + ASSERT_TRUE(s.isString()); + std::string sString = s.asString(); + if (sString.empty()) { + EXPECT_FALSE(foundEmpty); + foundEmpty = true; + } else if (sString == okstr1.get()) { + EXPECT_FALSE(foundOkstr1); + foundOkstr1 = true; + } else if (sString == okstr2Label) { + EXPECT_FALSE(foundOkstr2); + foundOkstr2 = true; + } else if (sString == longstrCut.get()) { + EXPECT_FALSE(foundTooLong); + foundTooLong = true; + } else { + EXPECT_NE(sString, longstr.get()); + } + } + EXPECT_TRUE(foundEmpty); + EXPECT_TRUE(foundOkstr1); + EXPECT_TRUE(foundOkstr2); + EXPECT_TRUE(foundTooLong); + + { + GET_JSON(markers, thread0["markers"], Object); + + { + GET_JSON(data, markers["data"], Array); + + for (const Json::Value& marker : data) { + // Name the indexes into the marker tuple: + // [name, startTime, endTime, phase, category, payload] + const unsigned int NAME = 0u; + const unsigned int START_TIME = 1u; + const unsigned int END_TIME = 2u; + const unsigned int PHASE = 3u; + const unsigned int CATEGORY = 4u; + const unsigned int PAYLOAD = 5u; + + const unsigned int PHASE_INSTANT = 0; + const unsigned int PHASE_INTERVAL = 1; + const unsigned int PHASE_START = 2; + const unsigned int PHASE_END = 3; + + const unsigned int SIZE_WITHOUT_PAYLOAD = 5u; + const unsigned int SIZE_WITH_PAYLOAD = 6u; + + ASSERT_TRUE(marker.isArray()); + // The payload is optional. + ASSERT_GE(marker.size(), SIZE_WITHOUT_PAYLOAD); + ASSERT_LE(marker.size(), SIZE_WITH_PAYLOAD); + + // root.threads[0].markers.data[i] is an array with 5 or 6 + // elements. + + ASSERT_TRUE(marker[NAME].isUInt()); // name id + GET_JSON(name, stringTable[marker[NAME].asUInt()], String); + std::string nameString = name.asString(); + + EXPECT_TRUE(marker[START_TIME].isNumeric()); + EXPECT_TRUE(marker[END_TIME].isNumeric()); + EXPECT_TRUE(marker[PHASE].isUInt()); + EXPECT_TRUE(marker[PHASE].asUInt() < 4); + EXPECT_TRUE(marker[CATEGORY].isUInt()); + +# define EXPECT_TIMING_INSTANT \ + EXPECT_NE(marker[START_TIME].asDouble(), 0); \ + EXPECT_EQ(marker[END_TIME].asDouble(), 0); \ + EXPECT_EQ(marker[PHASE].asUInt(), PHASE_INSTANT); +# define EXPECT_TIMING_INTERVAL \ + EXPECT_NE(marker[START_TIME].asDouble(), 0); \ + EXPECT_NE(marker[END_TIME].asDouble(), 0); \ + EXPECT_EQ(marker[PHASE].asUInt(), PHASE_INTERVAL); +# define EXPECT_TIMING_START \ + EXPECT_NE(marker[START_TIME].asDouble(), 0); \ + EXPECT_EQ(marker[END_TIME].asDouble(), 0); \ + EXPECT_EQ(marker[PHASE].asUInt(), PHASE_START); +# define EXPECT_TIMING_END \ + EXPECT_EQ(marker[START_TIME].asDouble(), 0); \ + EXPECT_NE(marker[END_TIME].asDouble(), 0); \ + EXPECT_EQ(marker[PHASE].asUInt(), PHASE_END); + +# define EXPECT_TIMING_INSTANT_AT(t) \ + EXPECT_EQ(marker[START_TIME].asDouble(), t); \ + EXPECT_EQ(marker[END_TIME].asDouble(), 0); \ + EXPECT_EQ(marker[PHASE].asUInt(), PHASE_INSTANT); +# define EXPECT_TIMING_INTERVAL_AT(start, end) \ + EXPECT_EQ(marker[START_TIME].asDouble(), start); \ + EXPECT_EQ(marker[END_TIME].asDouble(), end); \ + EXPECT_EQ(marker[PHASE].asUInt(), PHASE_INTERVAL); +# define EXPECT_TIMING_START_AT(start) \ + EXPECT_EQ(marker[START_TIME].asDouble(), start); \ + EXPECT_EQ(marker[END_TIME].asDouble(), 0); \ + EXPECT_EQ(marker[PHASE].asUInt(), PHASE_START); +# define EXPECT_TIMING_END_AT(end) \ + EXPECT_EQ(marker[START_TIME].asDouble(), 0); \ + EXPECT_EQ(marker[END_TIME].asDouble(), end); \ + EXPECT_EQ(marker[PHASE].asUInt(), PHASE_END); + + if (marker.size() == SIZE_WITHOUT_PAYLOAD) { + // root.threads[0].markers.data[i] is an array with 5 elements, + // so there is no payload. + if (nameString == "M1") { + ASSERT_EQ(state, S_M1); + state = State(state + 1); + } else if (nameString == "M3") { + ASSERT_EQ(state, S_M3); + state = State(state + 1); + } else if (nameString == + "default-templated markers 2.0 with empty options") { + EXPECT_EQ(state, S_Markers2DefaultEmptyOptions); + state = State(S_Markers2DefaultEmptyOptions + 1); +// TODO: Re-enable this when bug 1646714 lands, and check for stack. +# if 0 + } else if (nameString == + "default-templated markers 2.0 with option") { + EXPECT_EQ(state, S_Markers2DefaultWithOptions); + state = State(S_Markers2DefaultWithOptions + 1); +# endif + } else if (nameString == + "explicitly-default-templated markers 2.0 with " + "empty " + "options") { + EXPECT_EQ(state, S_Markers2ExplicitDefaultEmptyOptions); + state = State(S_Markers2ExplicitDefaultEmptyOptions + 1); + } else if (nameString == + "explicitly-default-templated markers 2.0 with " + "option") { + EXPECT_EQ(state, S_Markers2ExplicitDefaultWithOptions); + state = State(S_Markers2ExplicitDefaultWithOptions + 1); + } + } else { + // root.threads[0].markers.data[i] is an array with 6 elements, + // so there is a payload. + GET_JSON(payload, marker[PAYLOAD], Object); + + // root.threads[0].markers.data[i][PAYLOAD] is an object + // (payload). + + // It should at least have a "type" string. + GET_JSON(type, payload["type"], String); + std::string typeString = type.asString(); + + if (nameString == "tracing event") { + EXPECT_EQ(state, S_tracing_event); + state = State(S_tracing_event + 1); + EXPECT_EQ(typeString, "tracing"); + EXPECT_TIMING_INSTANT; + EXPECT_EQ_JSON(payload["category"], String, "A"); + EXPECT_TRUE(payload["stack"].isNull()); + + } else if (nameString == "tracing start") { + EXPECT_EQ(state, S_tracing_start); + state = State(S_tracing_start + 1); + EXPECT_EQ(typeString, "tracing"); + EXPECT_TIMING_START; + EXPECT_EQ_JSON(payload["category"], String, "A"); + EXPECT_TRUE(payload["stack"].isNull()); + + } else if (nameString == "tracing end") { + EXPECT_EQ(state, S_tracing_end); + state = State(S_tracing_end + 1); + EXPECT_EQ(typeString, "tracing"); + EXPECT_TIMING_END; + EXPECT_EQ_JSON(payload["category"], String, "A"); + EXPECT_TRUE(payload["stack"].isNull()); + + } else if (nameString == "tracing event with stack") { + EXPECT_EQ(state, S_tracing_event_with_stack); + state = State(S_tracing_event_with_stack + 1); + EXPECT_EQ(typeString, "tracing"); + EXPECT_TIMING_INSTANT; + EXPECT_EQ_JSON(payload["category"], String, "B"); + EXPECT_TRUE(payload["stack"].isObject()); + + } else if (nameString == "auto tracing") { + switch (state) { + case S_tracing_auto_tracing_start: + state = State(S_tracing_auto_tracing_start + 1); + EXPECT_EQ(typeString, "tracing"); + EXPECT_TIMING_START; + EXPECT_EQ_JSON(payload["category"], String, "C"); + EXPECT_TRUE(payload["stack"].isNull()); + break; + case S_tracing_auto_tracing_end: + state = State(S_tracing_auto_tracing_end + 1); + EXPECT_EQ(typeString, "tracing"); + EXPECT_TIMING_END; + EXPECT_EQ_JSON(payload["category"], String, "C"); + ASSERT_TRUE(payload["stack"].isNull()); + break; + default: + EXPECT_TRUE(state == S_tracing_auto_tracing_start || + state == S_tracing_auto_tracing_end); + break; + } + + } else if (nameString == + "default-templated markers 2.0 with option") { + // TODO: Remove this when bug 1646714 lands. + EXPECT_EQ(state, S_Markers2DefaultWithOptions); + state = State(S_Markers2DefaultWithOptions + 1); + EXPECT_EQ(typeString, "NoPayloadUserData"); + EXPECT_FALSE(payload["stack"].isNull()); + + } else if (nameString == "FirstMarker") { + // Record start and end times, to compare with timestamps in + // following markers. + EXPECT_EQ(state, S_FirstMarker); + ts1Double = marker[START_TIME].asDouble(); + ts2Double = marker[END_TIME].asDouble(); + state = State(S_FirstMarker + 1); + EXPECT_EQ(typeString, "Text"); + EXPECT_EQ_JSON(payload["name"], String, "First Marker"); + + } else if (nameString == "Gtest custom marker") { + EXPECT_EQ(state, S_CustomMarker); + state = State(S_CustomMarker + 1); + EXPECT_EQ(typeString, "markers-gtest"); + EXPECT_EQ(payload.size(), 1u + 9u); + EXPECT_TRUE(payload["null"].isNull()); + EXPECT_EQ_JSON(payload["bool-false"], Bool, false); + EXPECT_EQ_JSON(payload["bool-true"], Bool, true); + EXPECT_EQ_JSON(payload["int"], Int64, 42); + EXPECT_EQ_JSON(payload["double"], Double, 43.0); + EXPECT_EQ_JSON(payload["text"], String, "gtest text"); + // Unique strings can be fetched from the string table. + ASSERT_TRUE(payload["unique text"].isUInt()); + auto textIndex = payload["unique text"].asUInt(); + GET_JSON(uniqueText, stringTable[textIndex], String); + ASSERT_TRUE(uniqueText.isString()); + ASSERT_EQ(uniqueText.asString(), "gtest unique text"); + // The duplicate unique text should have the exact same index. + EXPECT_EQ_JSON(payload["unique text again"], UInt, textIndex); + EXPECT_EQ_JSON(payload["time"], Double, ts1Double); + + } else if (nameString == "Gtest special marker") { + EXPECT_EQ(state, S_SpecialMarker); + state = State(S_SpecialMarker + 1); + EXPECT_EQ(typeString, "markers-gtest-special"); + EXPECT_EQ(payload.size(), 1u) << "Only 'type' in the payload"; + + } else if (nameString == "Load 1: http://mozilla.org/") { + EXPECT_EQ(state, S_NetworkMarkerPayload_start); + state = State(S_NetworkMarkerPayload_start + 1); + EXPECT_EQ(typeString, "Network"); + EXPECT_EQ_JSON(payload["startTime"], Double, ts1Double); + EXPECT_EQ_JSON(payload["endTime"], Double, ts2Double); + EXPECT_EQ_JSON(payload["id"], Int64, 1); + EXPECT_EQ_JSON(payload["URI"], String, "http://mozilla.org/"); + EXPECT_EQ_JSON(payload["requestMethod"], String, "GET"); + EXPECT_EQ_JSON(payload["pri"], Int64, 34); + EXPECT_EQ_JSON(payload["count"], Int64, 56); + EXPECT_EQ_JSON(payload["cache"], String, "Hit"); + EXPECT_TRUE(payload["isPrivateBrowsing"].isNull()); + EXPECT_TRUE(payload["RedirectURI"].isNull()); + EXPECT_TRUE(payload["redirectType"].isNull()); + EXPECT_TRUE(payload["isHttpToHttpsRedirect"].isNull()); + EXPECT_TRUE(payload["redirectId"].isNull()); + EXPECT_TRUE(payload["contentType"].isNull()); + + } else if (nameString == "Load 2: http://mozilla.org/") { + EXPECT_EQ(state, S_NetworkMarkerPayload_stop); + state = State(S_NetworkMarkerPayload_stop + 1); + EXPECT_EQ(typeString, "Network"); + EXPECT_EQ_JSON(payload["startTime"], Double, ts1Double); + EXPECT_EQ_JSON(payload["endTime"], Double, ts2Double); + EXPECT_EQ_JSON(payload["id"], Int64, 2); + EXPECT_EQ_JSON(payload["URI"], String, "http://mozilla.org/"); + EXPECT_EQ_JSON(payload["requestMethod"], String, "GET"); + EXPECT_EQ_JSON(payload["pri"], Int64, 34); + EXPECT_EQ_JSON(payload["count"], Int64, 56); + EXPECT_EQ_JSON(payload["cache"], String, "Unresolved"); + EXPECT_TRUE(payload["isPrivateBrowsing"].isNull()); + EXPECT_TRUE(payload["RedirectURI"].isNull()); + EXPECT_TRUE(payload["redirectType"].isNull()); + EXPECT_TRUE(payload["isHttpToHttpsRedirect"].isNull()); + EXPECT_TRUE(payload["redirectId"].isNull()); + EXPECT_EQ_JSON(payload["contentType"], String, "text/html"); + + } else if (nameString == "Load 3: http://mozilla.org/") { + EXPECT_EQ(state, S_NetworkMarkerPayload_redirect_temporary); + state = State(S_NetworkMarkerPayload_redirect_temporary + 1); + EXPECT_EQ(typeString, "Network"); + EXPECT_EQ_JSON(payload["startTime"], Double, ts1Double); + EXPECT_EQ_JSON(payload["endTime"], Double, ts2Double); + EXPECT_EQ_JSON(payload["id"], Int64, 3); + EXPECT_EQ_JSON(payload["URI"], String, "http://mozilla.org/"); + EXPECT_EQ_JSON(payload["requestMethod"], String, "GET"); + EXPECT_EQ_JSON(payload["pri"], Int64, 34); + EXPECT_EQ_JSON(payload["count"], Int64, 56); + EXPECT_EQ_JSON(payload["cache"], String, "Unresolved"); + EXPECT_TRUE(payload["isPrivateBrowsing"].isNull()); + EXPECT_EQ_JSON(payload["RedirectURI"], String, + "http://example.com/"); + EXPECT_EQ_JSON(payload["redirectType"], String, "Temporary"); + EXPECT_EQ_JSON(payload["isHttpToHttpsRedirect"], Bool, false); + EXPECT_EQ_JSON(payload["redirectId"], Int64, 103); + EXPECT_TRUE(payload["contentType"].isNull()); + + } else if (nameString == "Load 4: http://mozilla.org/") { + EXPECT_EQ(state, S_NetworkMarkerPayload_redirect_permanent); + state = State(S_NetworkMarkerPayload_redirect_permanent + 1); + EXPECT_EQ(typeString, "Network"); + EXPECT_EQ_JSON(payload["startTime"], Double, ts1Double); + EXPECT_EQ_JSON(payload["endTime"], Double, ts2Double); + EXPECT_EQ_JSON(payload["id"], Int64, 4); + EXPECT_EQ_JSON(payload["URI"], String, "http://mozilla.org/"); + EXPECT_EQ_JSON(payload["requestMethod"], String, "GET"); + EXPECT_EQ_JSON(payload["pri"], Int64, 34); + EXPECT_EQ_JSON(payload["count"], Int64, 56); + EXPECT_EQ_JSON(payload["cache"], String, "Unresolved"); + EXPECT_TRUE(payload["isPrivateBrowsing"].isNull()); + EXPECT_EQ_JSON(payload["RedirectURI"], String, + "http://example.com/"); + EXPECT_EQ_JSON(payload["redirectType"], String, "Permanent"); + EXPECT_EQ_JSON(payload["isHttpToHttpsRedirect"], Bool, false); + EXPECT_EQ_JSON(payload["redirectId"], Int64, 104); + EXPECT_TRUE(payload["contentType"].isNull()); + + } else if (nameString == "Load 5: http://mozilla.org/") { + EXPECT_EQ(state, S_NetworkMarkerPayload_redirect_internal); + state = State(S_NetworkMarkerPayload_redirect_internal + 1); + EXPECT_EQ(typeString, "Network"); + EXPECT_EQ_JSON(payload["startTime"], Double, ts1Double); + EXPECT_EQ_JSON(payload["endTime"], Double, ts2Double); + EXPECT_EQ_JSON(payload["id"], Int64, 5); + EXPECT_EQ_JSON(payload["URI"], String, "http://mozilla.org/"); + EXPECT_EQ_JSON(payload["requestMethod"], String, "GET"); + EXPECT_EQ_JSON(payload["pri"], Int64, 34); + EXPECT_EQ_JSON(payload["count"], Int64, 56); + EXPECT_EQ_JSON(payload["cache"], String, "Unresolved"); + EXPECT_TRUE(payload["isPrivateBrowsing"].isNull()); + EXPECT_EQ_JSON(payload["RedirectURI"], String, + "http://example.com/"); + EXPECT_EQ_JSON(payload["redirectType"], String, "Internal"); + EXPECT_EQ_JSON(payload["isHttpToHttpsRedirect"], Bool, false); + EXPECT_EQ_JSON(payload["redirectId"], Int64, 105); + EXPECT_TRUE(payload["contentType"].isNull()); + + } else if (nameString == "Load 6: http://mozilla.org/") { + EXPECT_EQ(state, + S_NetworkMarkerPayload_redirect_internal_sts); + state = + State(S_NetworkMarkerPayload_redirect_internal_sts + 1); + EXPECT_EQ(typeString, "Network"); + EXPECT_EQ_JSON(payload["startTime"], Double, ts1Double); + EXPECT_EQ_JSON(payload["endTime"], Double, ts2Double); + EXPECT_EQ_JSON(payload["id"], Int64, 6); + EXPECT_EQ_JSON(payload["URI"], String, "http://mozilla.org/"); + EXPECT_EQ_JSON(payload["requestMethod"], String, "GET"); + EXPECT_EQ_JSON(payload["pri"], Int64, 34); + EXPECT_EQ_JSON(payload["count"], Int64, 56); + EXPECT_EQ_JSON(payload["cache"], String, "Unresolved"); + EXPECT_TRUE(payload["isPrivateBrowsing"].isNull()); + EXPECT_EQ_JSON(payload["RedirectURI"], String, + "http://example.com/"); + EXPECT_EQ_JSON(payload["redirectType"], String, "Internal"); + EXPECT_EQ_JSON(payload["isHttpToHttpsRedirect"], Bool, true); + EXPECT_EQ_JSON(payload["redirectId"], Int64, 106); + EXPECT_TRUE(payload["contentType"].isNull()); + + } else if (nameString == "Load 7: http://mozilla.org/") { + EXPECT_EQ(state, S_NetworkMarkerPayload_private_browsing); + state = State(S_NetworkMarkerPayload_private_browsing + 1); + EXPECT_EQ(typeString, "Network"); + EXPECT_EQ_JSON(payload["startTime"], Double, ts1Double); + EXPECT_EQ_JSON(payload["endTime"], Double, ts2Double); + EXPECT_EQ_JSON(payload["id"], Int64, 7); + EXPECT_EQ_JSON(payload["URI"], String, "http://mozilla.org/"); + EXPECT_EQ_JSON(payload["requestMethod"], String, "GET"); + EXPECT_EQ_JSON(payload["pri"], Int64, 34); + EXPECT_EQ_JSON(payload["count"], Int64, 56); + EXPECT_EQ_JSON(payload["cache"], String, "Unresolved"); + EXPECT_EQ_JSON(payload["isPrivateBrowsing"], Bool, true); + EXPECT_TRUE(payload["RedirectURI"].isNull()); + EXPECT_TRUE(payload["redirectType"].isNull()); + EXPECT_TRUE(payload["isHttpToHttpsRedirect"].isNull()); + EXPECT_TRUE(payload["redirectId"].isNull()); + EXPECT_TRUE(payload["contentType"].isNull()); + } else if (nameString == "Text in main thread with stack") { + EXPECT_EQ(state, S_TextWithStack); + state = State(S_TextWithStack + 1); + EXPECT_EQ(typeString, "Text"); + EXPECT_FALSE(payload["stack"].isNull()); + EXPECT_TIMING_INTERVAL_AT(ts1Double, ts2Double); + EXPECT_EQ_JSON(payload["name"], String, ""); + + } else if (nameString == "Text from main thread with stack") { + EXPECT_EQ(state, S_TextToMTWithStack); + state = State(S_TextToMTWithStack + 1); + EXPECT_EQ(typeString, "Text"); + EXPECT_FALSE(payload["stack"].isNull()); + EXPECT_EQ_JSON(payload["name"], String, ""); + + } else if (nameString == + "Text in registered thread with stack") { + ADD_FAILURE() + << "Unexpected 'Text in registered thread with stack'"; + + } else if (nameString == + "Text from registered thread with stack") { + EXPECT_EQ(state, S_RegThread_TextToMTWithStack); + state = State(S_RegThread_TextToMTWithStack + 1); + EXPECT_EQ(typeString, "Text"); + EXPECT_FALSE(payload["stack"].isNull()); + EXPECT_EQ_JSON(payload["name"], String, ""); + + } else if (nameString == + "Text in unregistered thread with stack") { + ADD_FAILURE() + << "Unexpected 'Text in unregistered thread with stack'"; + + } else if (nameString == + "Text from unregistered thread with stack") { + EXPECT_EQ(state, S_UnregThread_TextToMTWithStack); + state = State(S_UnregThread_TextToMTWithStack + 1); + EXPECT_EQ(typeString, "Text"); + EXPECT_TRUE(payload["stack"].isNull()); + EXPECT_EQ_JSON(payload["name"], String, ""); + } + } // marker with payload + } // for (marker : data) + } // markers.data + } // markers + } // thread0 + } // threads + // We should have read all expected markers. + EXPECT_EQ(state, S_LAST); + + { + GET_JSON(meta, root["meta"], Object); + + { + GET_JSON(markerSchema, meta["markerSchema"], Array); + + std::set<std::string> testedSchemaNames; + + for (const Json::Value& schema : markerSchema) { + GET_JSON(name, schema["name"], String); + const std::string nameString = name.asString(); + + GET_JSON(display, schema["display"], Array); + + GET_JSON(data, schema["data"], Array); + + EXPECT_TRUE( + testedSchemaNames + .insert(std::string(nameString.data(), nameString.size())) + .second) + << "Each schema name should be unique (inserted once in the set)"; + + if (nameString == "Text") { + EXPECT_EQ(display.size(), 2u); + EXPECT_EQ(display[0u].asString(), "marker-chart"); + EXPECT_EQ(display[1u].asString(), "marker-table"); + + ASSERT_EQ(data.size(), 1u); + + ASSERT_TRUE(data[0u].isObject()); + EXPECT_EQ_JSON(data[0u]["key"], String, "name"); + EXPECT_EQ_JSON(data[0u]["label"], String, "Details"); + EXPECT_EQ_JSON(data[0u]["format"], String, "string"); + + } else if (nameString == "NoPayloadUserData") { + // TODO: Remove this when bug 1646714 lands. + EXPECT_EQ(display.size(), 2u); + EXPECT_EQ(display[0u].asString(), "marker-chart"); + EXPECT_EQ(display[1u].asString(), "marker-table"); + + ASSERT_EQ(data.size(), 0u); + + } else if (nameString == "FileIO") { + // These are defined in ProfilerIOInterposeObserver.cpp + + } else if (nameString == "tracing") { + EXPECT_EQ(display.size(), 3u); + EXPECT_EQ(display[0u].asString(), "marker-chart"); + EXPECT_EQ(display[1u].asString(), "marker-table"); + EXPECT_EQ(display[2u].asString(), "timeline-overview"); + + ASSERT_EQ(data.size(), 1u); + + ASSERT_TRUE(data[0u].isObject()); + EXPECT_EQ_JSON(data[0u]["key"], String, "category"); + EXPECT_EQ_JSON(data[0u]["label"], String, "Type"); + EXPECT_EQ_JSON(data[0u]["format"], String, "string"); + + } else if (nameString == "BHR-detected hang") { + EXPECT_EQ(display.size(), 2u); + EXPECT_EQ(display[0u].asString(), "marker-chart"); + EXPECT_EQ(display[1u].asString(), "marker-table"); + + ASSERT_EQ(data.size(), 0u); + + } else if (nameString == "MainThreadLongTask") { + EXPECT_EQ(display.size(), 2u); + EXPECT_EQ(display[0u].asString(), "marker-chart"); + EXPECT_EQ(display[1u].asString(), "marker-table"); + + ASSERT_EQ(data.size(), 1u); + + ASSERT_TRUE(data[0u].isObject()); + EXPECT_EQ_JSON(data[0u]["key"], String, "category"); + EXPECT_EQ_JSON(data[0u]["label"], String, "Type"); + EXPECT_EQ_JSON(data[0u]["format"], String, "string"); + + } else if (nameString == "Log") { + EXPECT_EQ(display.size(), 1u); + EXPECT_EQ(display[0u].asString(), "marker-table"); + + ASSERT_EQ(data.size(), 2u); + + ASSERT_TRUE(data[0u].isObject()); + EXPECT_EQ_JSON(data[0u]["key"], String, "module"); + EXPECT_EQ_JSON(data[0u]["label"], String, "Module"); + EXPECT_EQ_JSON(data[0u]["format"], String, "string"); + + ASSERT_TRUE(data[1u].isObject()); + EXPECT_EQ_JSON(data[1u]["key"], String, "name"); + EXPECT_EQ_JSON(data[1u]["label"], String, "Name"); + EXPECT_EQ_JSON(data[1u]["format"], String, "string"); + + } else if (nameString == "MediaSample") { + EXPECT_EQ(display.size(), 2u); + EXPECT_EQ(display[0u].asString(), "marker-chart"); + EXPECT_EQ(display[1u].asString(), "marker-table"); + + ASSERT_EQ(data.size(), 3u); + + ASSERT_TRUE(data[0u].isObject()); + EXPECT_EQ_JSON(data[0u]["key"], String, "sampleStartTimeUs"); + EXPECT_EQ_JSON(data[0u]["label"], String, "Sample start time"); + EXPECT_EQ_JSON(data[0u]["format"], String, "microseconds"); + + ASSERT_TRUE(data[1u].isObject()); + EXPECT_EQ_JSON(data[1u]["key"], String, "sampleEndTimeUs"); + EXPECT_EQ_JSON(data[1u]["label"], String, "Sample end time"); + EXPECT_EQ_JSON(data[1u]["format"], String, "microseconds"); + + ASSERT_TRUE(data[2u].isObject()); + EXPECT_EQ_JSON(data[2u]["key"], String, "queueLength"); + EXPECT_EQ_JSON(data[2u]["label"], String, "Queue length"); + EXPECT_EQ_JSON(data[2u]["format"], String, "integer"); + + } else if (nameString == "VideoFallingBehind") { + EXPECT_EQ(display.size(), 2u); + EXPECT_EQ(display[0u].asString(), "marker-chart"); + EXPECT_EQ(display[1u].asString(), "marker-table"); + + ASSERT_EQ(data.size(), 2u); + + ASSERT_TRUE(data[0u].isObject()); + EXPECT_EQ_JSON(data[0u]["key"], String, "videoFrameStartTimeUs"); + EXPECT_EQ_JSON(data[0u]["label"], String, "Video frame start time"); + EXPECT_EQ_JSON(data[0u]["format"], String, "microseconds"); + + ASSERT_TRUE(data[1u].isObject()); + EXPECT_EQ_JSON(data[1u]["key"], String, "mediaCurrentTimeUs"); + EXPECT_EQ_JSON(data[1u]["label"], String, "Media current time"); + EXPECT_EQ_JSON(data[1u]["format"], String, "microseconds"); + + } else if (nameString == "Budget") { + EXPECT_EQ(display.size(), 2u); + EXPECT_EQ(display[0u].asString(), "marker-chart"); + EXPECT_EQ(display[1u].asString(), "marker-table"); + + ASSERT_EQ(data.size(), 0u); + + } else if (nameString == "markers-gtest") { + EXPECT_EQ(display.size(), 7u); + EXPECT_EQ(display[0u].asString(), "marker-chart"); + EXPECT_EQ(display[1u].asString(), "marker-table"); + EXPECT_EQ(display[2u].asString(), "timeline-overview"); + EXPECT_EQ(display[3u].asString(), "timeline-memory"); + EXPECT_EQ(display[4u].asString(), "timeline-ipc"); + EXPECT_EQ(display[5u].asString(), "timeline-fileio"); + EXPECT_EQ(display[6u].asString(), "stack-chart"); + + EXPECT_EQ_JSON(schema["chartLabel"], String, "chart label"); + EXPECT_EQ_JSON(schema["tooltipLabel"], String, "tooltip label"); + EXPECT_EQ_JSON(schema["tableLabel"], String, "table label"); + + ASSERT_EQ(data.size(), 14u); + + ASSERT_TRUE(data[0u].isObject()); + EXPECT_EQ_JSON(data[0u]["key"], String, "key with url"); + EXPECT_TRUE(data[0u]["label"].isNull()); + EXPECT_EQ_JSON(data[0u]["format"], String, "url"); + EXPECT_TRUE(data[0u]["searchable"].isNull()); + + ASSERT_TRUE(data[1u].isObject()); + EXPECT_EQ_JSON(data[1u]["key"], String, "key with label filePath"); + EXPECT_EQ_JSON(data[1u]["label"], String, "label filePath"); + EXPECT_EQ_JSON(data[1u]["format"], String, "file-path"); + EXPECT_TRUE(data[1u]["searchable"].isNull()); + + ASSERT_TRUE(data[2u].isObject()); + EXPECT_EQ_JSON(data[2u]["key"], String, + "key with string not-searchable"); + EXPECT_TRUE(data[2u]["label"].isNull()); + EXPECT_EQ_JSON(data[2u]["format"], String, "string"); + EXPECT_EQ_JSON(data[2u]["searchable"], Bool, false); + + ASSERT_TRUE(data[3u].isObject()); + EXPECT_EQ_JSON(data[3u]["key"], String, + "key with label duration searchable"); + EXPECT_TRUE(data[3u]["label duration"].isNull()); + EXPECT_EQ_JSON(data[3u]["format"], String, "duration"); + EXPECT_EQ_JSON(data[3u]["searchable"], Bool, true); + + ASSERT_TRUE(data[4u].isObject()); + EXPECT_EQ_JSON(data[4u]["key"], String, "key with time"); + EXPECT_TRUE(data[4u]["label"].isNull()); + EXPECT_EQ_JSON(data[4u]["format"], String, "time"); + EXPECT_TRUE(data[4u]["searchable"].isNull()); + + ASSERT_TRUE(data[5u].isObject()); + EXPECT_EQ_JSON(data[5u]["key"], String, "key with seconds"); + EXPECT_TRUE(data[5u]["label"].isNull()); + EXPECT_EQ_JSON(data[5u]["format"], String, "seconds"); + EXPECT_TRUE(data[5u]["searchable"].isNull()); + + ASSERT_TRUE(data[6u].isObject()); + EXPECT_EQ_JSON(data[6u]["key"], String, "key with milliseconds"); + EXPECT_TRUE(data[6u]["label"].isNull()); + EXPECT_EQ_JSON(data[6u]["format"], String, "milliseconds"); + EXPECT_TRUE(data[6u]["searchable"].isNull()); + + ASSERT_TRUE(data[7u].isObject()); + EXPECT_EQ_JSON(data[7u]["key"], String, "key with microseconds"); + EXPECT_TRUE(data[7u]["label"].isNull()); + EXPECT_EQ_JSON(data[7u]["format"], String, "microseconds"); + EXPECT_TRUE(data[7u]["searchable"].isNull()); + + ASSERT_TRUE(data[8u].isObject()); + EXPECT_EQ_JSON(data[8u]["key"], String, "key with nanoseconds"); + EXPECT_TRUE(data[8u]["label"].isNull()); + EXPECT_EQ_JSON(data[8u]["format"], String, "nanoseconds"); + EXPECT_TRUE(data[8u]["searchable"].isNull()); + + ASSERT_TRUE(data[9u].isObject()); + EXPECT_EQ_JSON(data[9u]["key"], String, "key with bytes"); + EXPECT_TRUE(data[9u]["label"].isNull()); + EXPECT_EQ_JSON(data[9u]["format"], String, "bytes"); + EXPECT_TRUE(data[9u]["searchable"].isNull()); + + ASSERT_TRUE(data[10u].isObject()); + EXPECT_EQ_JSON(data[10u]["key"], String, "key with percentage"); + EXPECT_TRUE(data[10u]["label"].isNull()); + EXPECT_EQ_JSON(data[10u]["format"], String, "percentage"); + EXPECT_TRUE(data[10u]["searchable"].isNull()); + + ASSERT_TRUE(data[11u].isObject()); + EXPECT_EQ_JSON(data[11u]["key"], String, "key with integer"); + EXPECT_TRUE(data[11u]["label"].isNull()); + EXPECT_EQ_JSON(data[11u]["format"], String, "integer"); + EXPECT_TRUE(data[11u]["searchable"].isNull()); + + ASSERT_TRUE(data[12u].isObject()); + EXPECT_EQ_JSON(data[12u]["key"], String, "key with decimal"); + EXPECT_TRUE(data[12u]["label"].isNull()); + EXPECT_EQ_JSON(data[12u]["format"], String, "decimal"); + EXPECT_TRUE(data[12u]["searchable"].isNull()); + + ASSERT_TRUE(data[13u].isObject()); + EXPECT_EQ_JSON(data[13u]["label"], String, "static label"); + EXPECT_EQ_JSON(data[13u]["value"], String, "static value"); + + } else if (nameString == "markers-gtest-special") { + EXPECT_EQ(display.size(), 0u); + ASSERT_EQ(data.size(), 0u); + + } else if (nameString == "markers-gtest-unused") { + ADD_FAILURE() << "Schema for GtestUnusedMarker should not be here"; + + } else { + printf("FYI: Unknown marker schema '%s'\n", nameString.c_str()); + } + } + + // Check that we've got all expected schema. + EXPECT_TRUE(testedSchemaNames.find("Text") != testedSchemaNames.end()); + EXPECT_TRUE(testedSchemaNames.find("tracing") != + testedSchemaNames.end()); + EXPECT_TRUE(testedSchemaNames.find("MediaSample") != + testedSchemaNames.end()); + } // markerSchema + } // meta + }); + + Maybe<ProfilerBufferInfo> info = profiler_get_buffer_info(); + EXPECT_TRUE(info.isSome()); + printf("Profiler buffer range: %llu .. %llu (%llu bytes)\n", + static_cast<unsigned long long>(info->mRangeStart), + static_cast<unsigned long long>(info->mRangeEnd), + // sizeof(ProfileBufferEntry) == 9 + (static_cast<unsigned long long>(info->mRangeEnd) - + static_cast<unsigned long long>(info->mRangeStart)) * + 9); + printf("Stats: min(us) .. mean(us) .. max(us) [count]\n"); + printf("- Intervals: %7.1f .. %7.1f .. %7.1f [%u]\n", + info->mIntervalsUs.min, info->mIntervalsUs.sum / info->mIntervalsUs.n, + info->mIntervalsUs.max, info->mIntervalsUs.n); + printf("- Overheads: %7.1f .. %7.1f .. %7.1f [%u]\n", + info->mOverheadsUs.min, info->mOverheadsUs.sum / info->mOverheadsUs.n, + info->mOverheadsUs.max, info->mOverheadsUs.n); + printf(" - Locking: %7.1f .. %7.1f .. %7.1f [%u]\n", + info->mLockingsUs.min, info->mLockingsUs.sum / info->mLockingsUs.n, + info->mLockingsUs.max, info->mLockingsUs.n); + printf(" - Clearning: %7.1f .. %7.1f .. %7.1f [%u]\n", + info->mCleaningsUs.min, info->mCleaningsUs.sum / info->mCleaningsUs.n, + info->mCleaningsUs.max, info->mCleaningsUs.n); + printf(" - Counters: %7.1f .. %7.1f .. %7.1f [%u]\n", + info->mCountersUs.min, info->mCountersUs.sum / info->mCountersUs.n, + info->mCountersUs.max, info->mCountersUs.n); + printf(" - Threads: %7.1f .. %7.1f .. %7.1f [%u]\n", + info->mThreadsUs.min, info->mThreadsUs.sum / info->mThreadsUs.n, + info->mThreadsUs.max, info->mThreadsUs.n); + + profiler_stop(); + + // Try to add markers while the profiler is stopped. + PROFILER_MARKER_UNTYPED("marker after profiler_stop", OTHER); + + // Warning: this could be racy + profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, features, + filters, MOZ_ARRAY_LENGTH(filters), 0); + + // This last marker shouldn't get streamed. + SpliceableChunkedJSONWriter w2{FailureLatchInfallibleSource::Singleton()}; + w2.Start(); + EXPECT_TRUE(::profiler_stream_json_for_this_process(w2).isOk()); + w2.End(); + EXPECT_FALSE(w2.Failed()); + UniquePtr<char[]> profile2 = w2.ChunkedWriteFunc().CopyData(); + ASSERT_TRUE(!!profile2.get()); + EXPECT_TRUE( + std::string_view(profile2.get()).find("marker after profiler_stop") == + std::string_view::npos); + + profiler_stop(); +} + +# define COUNTER_NAME "TestCounter" +# define COUNTER_DESCRIPTION "Test of counters in profiles" +# define COUNTER_NAME2 "Counter2" +# define COUNTER_DESCRIPTION2 "Second Test of counters in profiles" + +PROFILER_DEFINE_COUNT_TOTAL(TestCounter, COUNTER_NAME, COUNTER_DESCRIPTION); +PROFILER_DEFINE_COUNT_TOTAL(TestCounter2, COUNTER_NAME2, COUNTER_DESCRIPTION2); + +TEST(GeckoProfiler, Counters) +{ + uint32_t features = 0; + const char* filters[] = {"GeckoMain"}; + + // We will record some counter values, and check that they're present (and no + // other) when expected. + + struct NumberAndCount { + uint64_t mNumber; + int64_t mCount; + }; + + int64_t testCounters[] = {10, 7, -17}; + NumberAndCount expectedTestCounters[] = {{1u, 10}, {0u, 0}, {1u, 7}, + {0u, 0}, {0u, 0}, {1u, -17}, + {0u, 0}, {0u, 0}}; + constexpr size_t expectedTestCountersCount = + MOZ_ARRAY_LENGTH(expectedTestCounters); + + bool expectCounter2 = false; + int64_t testCounters2[] = {10}; + NumberAndCount expectedTestCounters2[] = {{1u, 10}, {0u, 0}}; + constexpr size_t expectedTestCounters2Count = + MOZ_ARRAY_LENGTH(expectedTestCounters2); + + auto checkCountersInJSON = [&](const Json::Value& aRoot) { + size_t nextExpectedTestCounter = 0u; + size_t nextExpectedTestCounter2 = 0u; + + GET_JSON(counters, aRoot["counters"], Array); + for (const Json::Value& counter : counters) { + ASSERT_TRUE(counter.isObject()); + GET_JSON_VALUE(name, counter["name"], String); + if (name == "TestCounter") { + EXPECT_EQ_JSON(counter["category"], String, COUNTER_NAME); + EXPECT_EQ_JSON(counter["description"], String, COUNTER_DESCRIPTION); + GET_JSON(sampleGroups, counter["sample_groups"], Array); + for (const Json::Value& sampleGroup : sampleGroups) { + ASSERT_TRUE(sampleGroup.isObject()); + EXPECT_EQ_JSON(sampleGroup["id"], UInt, 0u); + + GET_JSON(samples, sampleGroup["samples"], Object); + GET_JSON(samplesSchema, samples["schema"], Object); + EXPECT_GE(samplesSchema.size(), 3u); + GET_JSON_VALUE(samplesNumber, samplesSchema["number"], UInt); + GET_JSON_VALUE(samplesCount, samplesSchema["count"], UInt); + GET_JSON(samplesData, samples["data"], Array); + for (const Json::Value& sample : samplesData) { + ASSERT_TRUE(sample.isArray()); + ASSERT_LT(nextExpectedTestCounter, expectedTestCountersCount); + EXPECT_EQ_JSON( + sample[samplesNumber], UInt64, + expectedTestCounters[nextExpectedTestCounter].mNumber); + EXPECT_EQ_JSON( + sample[samplesCount], Int64, + expectedTestCounters[nextExpectedTestCounter].mCount); + ++nextExpectedTestCounter; + } + } + } else if (name == "TestCounter2") { + EXPECT_TRUE(expectCounter2); + + EXPECT_EQ_JSON(counter["category"], String, COUNTER_NAME2); + EXPECT_EQ_JSON(counter["description"], String, COUNTER_DESCRIPTION2); + GET_JSON(sampleGroups, counter["sample_groups"], Array); + for (const Json::Value& sampleGroup : sampleGroups) { + ASSERT_TRUE(sampleGroup.isObject()); + EXPECT_EQ_JSON(sampleGroup["id"], UInt, 0u); + + GET_JSON(samples, sampleGroup["samples"], Object); + GET_JSON(samplesSchema, samples["schema"], Object); + EXPECT_GE(samplesSchema.size(), 3u); + GET_JSON_VALUE(samplesNumber, samplesSchema["number"], UInt); + GET_JSON_VALUE(samplesCount, samplesSchema["count"], UInt); + GET_JSON(samplesData, samples["data"], Array); + for (const Json::Value& sample : samplesData) { + ASSERT_TRUE(sample.isArray()); + ASSERT_LT(nextExpectedTestCounter2, expectedTestCounters2Count); + EXPECT_EQ_JSON( + sample[samplesNumber], UInt64, + expectedTestCounters2[nextExpectedTestCounter2].mNumber); + EXPECT_EQ_JSON( + sample[samplesCount], Int64, + expectedTestCounters2[nextExpectedTestCounter2].mCount); + ++nextExpectedTestCounter2; + } + } + } + } + + EXPECT_EQ(nextExpectedTestCounter, expectedTestCountersCount); + if (expectCounter2) { + EXPECT_EQ(nextExpectedTestCounter2, expectedTestCounters2Count); + } + }; + + // Inactive -> Active + profiler_ensure_started(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, + features, filters, MOZ_ARRAY_LENGTH(filters), 0); + + // Output all "TestCounter"s, with increasing delays (to test different + // number of counter samplings). + int samplingWaits = 2; + for (int64_t counter : testCounters) { + AUTO_PROFILER_COUNT_TOTAL(TestCounter, counter); + for (int i = 0; i < samplingWaits; ++i) { + ASSERT_EQ(WaitForSamplingState(), SamplingState::SamplingCompleted); + } + ++samplingWaits; + } + + // Verify we got "TestCounter" in the output, but not "TestCounter2" yet. + UniquePtr<char[]> profile = profiler_get_profile(); + JSONOutputCheck(profile.get(), checkCountersInJSON); + + // Now introduce TestCounter2. + expectCounter2 = true; + for (int64_t counter2 : testCounters2) { + AUTO_PROFILER_COUNT_TOTAL(TestCounter2, counter2); + ASSERT_EQ(WaitForSamplingState(), SamplingState::SamplingCompleted); + ASSERT_EQ(WaitForSamplingState(), SamplingState::SamplingCompleted); + } + + // Verify we got both "TestCounter" and "TestCounter2" in the output. + profile = profiler_get_profile(); + JSONOutputCheck(profile.get(), checkCountersInJSON); + + profiler_stop(); +} + +TEST(GeckoProfiler, Time) +{ + uint32_t features = ProfilerFeature::StackWalk; + const char* filters[] = {"GeckoMain"}; + + double t1 = profiler_time(); + double t2 = profiler_time(); + ASSERT_TRUE(t1 <= t2); + + // profiler_start() restarts the timer used by profiler_time(). + profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, features, + filters, MOZ_ARRAY_LENGTH(filters), 0); + + double t3 = profiler_time(); + double t4 = profiler_time(); + ASSERT_TRUE(t3 <= t4); + + profiler_stop(); + + double t5 = profiler_time(); + double t6 = profiler_time(); + ASSERT_TRUE(t4 <= t5 && t1 <= t6); +} + +TEST(GeckoProfiler, GetProfile) +{ + uint32_t features = ProfilerFeature::StackWalk; + const char* filters[] = {"GeckoMain"}; + + ASSERT_TRUE(!profiler_get_profile()); + + profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, features, + filters, MOZ_ARRAY_LENGTH(filters), 0); + + mozilla::Maybe<uint32_t> activeFeatures = profiler_features_if_active(); + ASSERT_TRUE(activeFeatures.isSome()); + // Not all platforms support stack-walking. + const bool hasStackWalk = ProfilerFeature::HasStackWalk(*activeFeatures); + + UniquePtr<char[]> profile = profiler_get_profile(); + JSONOutputCheck(profile.get(), [&](const Json::Value& aRoot) { + GET_JSON(meta, aRoot["meta"], Object); + { + GET_JSON(configuration, meta["configuration"], Object); + { + GET_JSON(features, configuration["features"], Array); + { + EXPECT_EQ(features.size(), (hasStackWalk ? 1u : 0u)); + if (hasStackWalk) { + EXPECT_JSON_ARRAY_CONTAINS(features, String, "stackwalk"); + } + } + GET_JSON(threads, configuration["threads"], Array); + { + EXPECT_EQ(threads.size(), 1u); + EXPECT_JSON_ARRAY_CONTAINS(threads, String, "GeckoMain"); + } + } + } + }); + + profiler_stop(); + + ASSERT_TRUE(!profiler_get_profile()); +} + +TEST(GeckoProfiler, StreamJSONForThisProcess) +{ + uint32_t features = ProfilerFeature::StackWalk; + const char* filters[] = {"GeckoMain"}; + + SpliceableChunkedJSONWriter w{FailureLatchInfallibleSource::Singleton()}; + MOZ_RELEASE_ASSERT(!w.ChunkedWriteFunc().Fallible()); + MOZ_RELEASE_ASSERT(!w.ChunkedWriteFunc().Failed()); + MOZ_RELEASE_ASSERT(!w.ChunkedWriteFunc().GetFailure()); + MOZ_RELEASE_ASSERT(&w.ChunkedWriteFunc().SourceFailureLatch() == + &mozilla::FailureLatchInfallibleSource::Singleton()); + MOZ_RELEASE_ASSERT( + &std::as_const(w.ChunkedWriteFunc()).SourceFailureLatch() == + &mozilla::FailureLatchInfallibleSource::Singleton()); + MOZ_RELEASE_ASSERT(!w.Fallible()); + MOZ_RELEASE_ASSERT(!w.Failed()); + MOZ_RELEASE_ASSERT(!w.GetFailure()); + MOZ_RELEASE_ASSERT(&w.SourceFailureLatch() == + &mozilla::FailureLatchInfallibleSource::Singleton()); + MOZ_RELEASE_ASSERT(&std::as_const(w).SourceFailureLatch() == + &mozilla::FailureLatchInfallibleSource::Singleton()); + + ASSERT_TRUE(::profiler_stream_json_for_this_process(w).isErr()); + MOZ_RELEASE_ASSERT(!w.ChunkedWriteFunc().Failed()); + MOZ_RELEASE_ASSERT(!w.ChunkedWriteFunc().GetFailure()); + MOZ_RELEASE_ASSERT(!w.Failed()); + MOZ_RELEASE_ASSERT(!w.GetFailure()); + + profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, features, + filters, MOZ_ARRAY_LENGTH(filters), 0); + + w.Start(); + ASSERT_TRUE(::profiler_stream_json_for_this_process(w).isOk()); + w.End(); + + MOZ_RELEASE_ASSERT(!w.ChunkedWriteFunc().Failed()); + MOZ_RELEASE_ASSERT(!w.ChunkedWriteFunc().GetFailure()); + MOZ_RELEASE_ASSERT(!w.Failed()); + MOZ_RELEASE_ASSERT(!w.GetFailure()); + + UniquePtr<char[]> profile = w.ChunkedWriteFunc().CopyData(); + + JSONOutputCheck(profile.get(), [](const Json::Value&) {}); + + profiler_stop(); + + ASSERT_TRUE(::profiler_stream_json_for_this_process(w).isErr()); +} + +// Internal version of profiler_stream_json_for_this_process, which allows being +// called from a non-main thread of the parent process, at the risk of getting +// an incomplete profile. +ProfilerResult<ProfileGenerationAdditionalInformation> +do_profiler_stream_json_for_this_process( + SpliceableJSONWriter& aWriter, double aSinceTime, bool aIsShuttingDown, + ProfilerCodeAddressService* aService, + mozilla::ProgressLogger aProgressLogger); + +TEST(GeckoProfiler, StreamJSONForThisProcessThreaded) +{ + // Same as the previous test, but calling some things on background threads. + nsCOMPtr<nsIThread> thread; + nsresult rv = NS_NewNamedThread("GeckoProfGTest", getter_AddRefs(thread)); + ASSERT_NS_SUCCEEDED(rv); + + uint32_t features = ProfilerFeature::StackWalk; + const char* filters[] = {"GeckoMain"}; + + SpliceableChunkedJSONWriter w{FailureLatchInfallibleSource::Singleton()}; + MOZ_RELEASE_ASSERT(!w.ChunkedWriteFunc().Fallible()); + MOZ_RELEASE_ASSERT(!w.ChunkedWriteFunc().Failed()); + MOZ_RELEASE_ASSERT(!w.ChunkedWriteFunc().GetFailure()); + MOZ_RELEASE_ASSERT(&w.ChunkedWriteFunc().SourceFailureLatch() == + &mozilla::FailureLatchInfallibleSource::Singleton()); + MOZ_RELEASE_ASSERT( + &std::as_const(w.ChunkedWriteFunc()).SourceFailureLatch() == + &mozilla::FailureLatchInfallibleSource::Singleton()); + MOZ_RELEASE_ASSERT(!w.Fallible()); + MOZ_RELEASE_ASSERT(!w.Failed()); + MOZ_RELEASE_ASSERT(!w.GetFailure()); + MOZ_RELEASE_ASSERT(&w.SourceFailureLatch() == + &mozilla::FailureLatchInfallibleSource::Singleton()); + MOZ_RELEASE_ASSERT(&std::as_const(w).SourceFailureLatch() == + &mozilla::FailureLatchInfallibleSource::Singleton()); + + ASSERT_TRUE(::profiler_stream_json_for_this_process(w).isErr()); + MOZ_RELEASE_ASSERT(!w.ChunkedWriteFunc().Failed()); + MOZ_RELEASE_ASSERT(!w.ChunkedWriteFunc().GetFailure()); + MOZ_RELEASE_ASSERT(!w.Failed()); + MOZ_RELEASE_ASSERT(!w.GetFailure()); + + // Start the profiler on the main thread. + profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, features, + filters, MOZ_ARRAY_LENGTH(filters), 0); + + // Call profiler_stream_json_for_this_process on a background thread. + NS_DispatchAndSpinEventLoopUntilComplete( + "GeckoProfiler_StreamJSONForThisProcessThreaded_Test::TestBody"_ns, + thread, + NS_NewRunnableFunction( + "GeckoProfiler_StreamJSONForThisProcessThreaded_Test::TestBody", + [&]() { + w.Start(); + ASSERT_TRUE(::do_profiler_stream_json_for_this_process( + w, /* double aSinceTime */ 0.0, + /* bool aIsShuttingDown */ false, + /* ProfilerCodeAddressService* aService */ nullptr, + mozilla::ProgressLogger{}) + .isOk()); + w.End(); + })); + + MOZ_RELEASE_ASSERT(!w.ChunkedWriteFunc().Failed()); + MOZ_RELEASE_ASSERT(!w.ChunkedWriteFunc().GetFailure()); + MOZ_RELEASE_ASSERT(!w.Failed()); + MOZ_RELEASE_ASSERT(!w.GetFailure()); + + UniquePtr<char[]> profile = w.ChunkedWriteFunc().CopyData(); + + JSONOutputCheck(profile.get(), [](const Json::Value&) {}); + + // Stop the profiler and call profiler_stream_json_for_this_process on a + // background thread. + NS_DispatchAndSpinEventLoopUntilComplete( + "GeckoProfiler_StreamJSONForThisProcessThreaded_Test::TestBody"_ns, + thread, + NS_NewRunnableFunction( + "GeckoProfiler_StreamJSONForThisProcessThreaded_Test::TestBody", + [&]() { + profiler_stop(); + ASSERT_TRUE(::do_profiler_stream_json_for_this_process( + w, /* double aSinceTime */ 0.0, + /* bool aIsShuttingDown */ false, + /* ProfilerCodeAddressService* aService */ nullptr, + mozilla::ProgressLogger{}) + .isErr()); + })); + thread->Shutdown(); + + // Call profiler_stream_json_for_this_process on the main thread. + ASSERT_TRUE(::profiler_stream_json_for_this_process(w).isErr()); +} + +TEST(GeckoProfiler, ProfilingStack) +{ + uint32_t features = ProfilerFeature::StackWalk; + const char* filters[] = {"GeckoMain"}; + + AUTO_PROFILER_LABEL("A::B", OTHER); + + UniqueFreePtr<char> dynamic(strdup("dynamic")); + { + AUTO_PROFILER_LABEL_DYNAMIC_CSTR("A::C", JS, dynamic.get()); + AUTO_PROFILER_LABEL_DYNAMIC_NSCSTRING("A::C2", JS, + nsDependentCString(dynamic.get())); + AUTO_PROFILER_LABEL_DYNAMIC_LOSSY_NSSTRING( + "A::C3", JS, NS_ConvertUTF8toUTF16(dynamic.get())); + + profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, + features, filters, MOZ_ARRAY_LENGTH(filters), 0); + + ASSERT_TRUE(profiler_get_backtrace()); + } + + AutoProfilerLabel label1("A", nullptr, JS::ProfilingCategoryPair::DOM); + AutoProfilerLabel label2("A", dynamic.get(), + JS::ProfilingCategoryPair::NETWORK); + ASSERT_TRUE(profiler_get_backtrace()); + + profiler_stop(); + + ASSERT_TRUE(!profiler_get_profile()); +} + +TEST(GeckoProfiler, Bug1355807) +{ + uint32_t features = ProfilerFeature::JS; + const char* manyThreadsFilter[] = {""}; + const char* fewThreadsFilter[] = {"GeckoMain"}; + + profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, features, + manyThreadsFilter, MOZ_ARRAY_LENGTH(manyThreadsFilter), 0); + + profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, features, + fewThreadsFilter, MOZ_ARRAY_LENGTH(fewThreadsFilter), 0); + + // In bug 1355807 this caused an assertion failure in StopJSSampling(). + profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, features, + fewThreadsFilter, MOZ_ARRAY_LENGTH(fewThreadsFilter), 0); + + profiler_stop(); +} + +class GTestStackCollector final : public ProfilerStackCollector { + public: + GTestStackCollector() : mSetIsMainThread(0), mFrames(0) {} + + virtual void SetIsMainThread() { mSetIsMainThread++; } + + virtual void CollectNativeLeafAddr(void* aAddr) { mFrames++; } + virtual void CollectJitReturnAddr(void* aAddr) { mFrames++; } + virtual void CollectWasmFrame(const char* aLabel) { mFrames++; } + virtual void CollectProfilingStackFrame( + const js::ProfilingStackFrame& aFrame) { + mFrames++; + } + + int mSetIsMainThread; + int mFrames; +}; + +void DoSuspendAndSample(ProfilerThreadId aTidToSample, + nsIThread* aSamplingThread) { + NS_DispatchAndSpinEventLoopUntilComplete( + "GeckoProfiler_SuspendAndSample_Test::TestBody"_ns, aSamplingThread, + NS_NewRunnableFunction( + "GeckoProfiler_SuspendAndSample_Test::TestBody", [&]() { + uint32_t features = ProfilerFeature::CPUUtilization; + GTestStackCollector collector; + profiler_suspend_and_sample_thread(aTidToSample, features, + collector, + /* sampleNative = */ true); + + ASSERT_TRUE(collector.mSetIsMainThread == + (aTidToSample == profiler_main_thread_id())); + ASSERT_TRUE(collector.mFrames > 0); + })); +} + +TEST(GeckoProfiler, SuspendAndSample) +{ + nsCOMPtr<nsIThread> thread; + nsresult rv = NS_NewNamedThread("GeckoProfGTest", getter_AddRefs(thread)); + ASSERT_NS_SUCCEEDED(rv); + + ProfilerThreadId tid = profiler_current_thread_id(); + + ASSERT_TRUE(!profiler_is_active()); + + // Suspend and sample while the profiler is inactive. + DoSuspendAndSample(tid, thread); + + DoSuspendAndSample(ProfilerThreadId{}, thread); + + uint32_t features = ProfilerFeature::JS; + const char* filters[] = {"GeckoMain", "Compositor"}; + + profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, features, + filters, MOZ_ARRAY_LENGTH(filters), 0); + + ASSERT_TRUE(profiler_is_active()); + + // Suspend and sample while the profiler is active. + DoSuspendAndSample(tid, thread); + + DoSuspendAndSample(ProfilerThreadId{}, thread); + + profiler_stop(); + + ASSERT_TRUE(!profiler_is_active()); +} + +TEST(GeckoProfiler, PostSamplingCallback) +{ + const char* filters[] = {"GeckoMain"}; + + ASSERT_TRUE(!profiler_is_active()); + ASSERT_TRUE(!profiler_callback_after_sampling( + [&](SamplingState) { ASSERT_TRUE(false); })); + + profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, + ProfilerFeature::StackWalk, filters, MOZ_ARRAY_LENGTH(filters), + 0); + { + // Stack sampling -> This label should appear at least once. + AUTO_PROFILER_LABEL("PostSamplingCallback completed", OTHER); + ASSERT_EQ(WaitForSamplingState(), SamplingState::SamplingCompleted); + } + UniquePtr<char[]> profileCompleted = profiler_get_profile(); + JSONOutputCheck(profileCompleted.get(), [](const Json::Value& aRoot) { + GET_JSON(threads, aRoot["threads"], Array); + { + GET_JSON(thread0, threads[0], Object); + { + EXPECT_JSON_ARRAY_CONTAINS(thread0["stringTable"], String, + "PostSamplingCallback completed"); + } + } + }); + + profiler_pause(); + { + // Paused -> This label should not appear. + AUTO_PROFILER_LABEL("PostSamplingCallback paused", OTHER); + ASSERT_EQ(WaitForSamplingState(), SamplingState::SamplingPaused); + } + UniquePtr<char[]> profilePaused = profiler_get_profile(); + JSONOutputCheck(profilePaused.get(), [](const Json::Value& aRoot) {}); + // This string shouldn't appear *anywhere* in the profile. + ASSERT_FALSE(strstr(profilePaused.get(), "PostSamplingCallback paused")); + + profiler_resume(); + { + // Stack sampling -> This label should appear at least once. + AUTO_PROFILER_LABEL("PostSamplingCallback resumed", OTHER); + ASSERT_EQ(WaitForSamplingState(), SamplingState::SamplingCompleted); + } + UniquePtr<char[]> profileResumed = profiler_get_profile(); + JSONOutputCheck(profileResumed.get(), [](const Json::Value& aRoot) { + GET_JSON(threads, aRoot["threads"], Array); + { + GET_JSON(thread0, threads[0], Object); + { + EXPECT_JSON_ARRAY_CONTAINS(thread0["stringTable"], String, + "PostSamplingCallback resumed"); + } + } + }); + + profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, + ProfilerFeature::StackWalk | ProfilerFeature::NoStackSampling, + filters, MOZ_ARRAY_LENGTH(filters), 0); + { + // No stack sampling -> This label should not appear. + AUTO_PROFILER_LABEL("PostSamplingCallback completed (no stacks)", OTHER); + ASSERT_EQ(WaitForSamplingState(), SamplingState::NoStackSamplingCompleted); + } + UniquePtr<char[]> profileNoStacks = profiler_get_profile(); + JSONOutputCheck(profileNoStacks.get(), [](const Json::Value& aRoot) {}); + // This string shouldn't appear *anywhere* in the profile. + ASSERT_FALSE(strstr(profileNoStacks.get(), + "PostSamplingCallback completed (no stacks)")); + + // Note: There is no non-racy way to test for SamplingState::JustStopped, as + // it would require coordination between `profiler_stop()` and another thread + // doing `profiler_callback_after_sampling()` at just the right moment. + + profiler_stop(); + ASSERT_TRUE(!profiler_is_active()); + ASSERT_TRUE(!profiler_callback_after_sampling( + [&](SamplingState) { ASSERT_TRUE(false); })); +} + +TEST(GeckoProfiler, ProfilingStateCallback) +{ + const char* filters[] = {"GeckoMain"}; + + ASSERT_TRUE(!profiler_is_active()); + + struct ProfilingStateAndId { + ProfilingState mProfilingState; + int mId; + }; + DataMutex<Vector<ProfilingStateAndId>> states{"Profiling states"}; + auto CreateCallback = [&states](int id) { + return [id, &states](ProfilingState aProfilingState) { + auto lockedStates = states.Lock(); + ASSERT_TRUE( + lockedStates->append(ProfilingStateAndId{aProfilingState, id})); + }; + }; + auto CheckStatesIsEmpty = [&states]() { + auto lockedStates = states.Lock(); + EXPECT_TRUE(lockedStates->empty()); + }; + auto CheckStatesOnlyContains = [&states](ProfilingState aProfilingState, + int aId) { + auto lockedStates = states.Lock(); + EXPECT_EQ(lockedStates->length(), 1u); + if (lockedStates->length() >= 1u) { + EXPECT_EQ((*lockedStates)[0].mProfilingState, aProfilingState); + EXPECT_EQ((*lockedStates)[0].mId, aId); + } + lockedStates->clear(); + }; + + profiler_add_state_change_callback(AllProfilingStates(), CreateCallback(1), + 1); + // This is in case of error, and it also exercises the (allowed) removal of + // unknown callback ids. + auto cleanup1 = mozilla::MakeScopeExit( + []() { profiler_remove_state_change_callback(1); }); + CheckStatesIsEmpty(); + + profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, + ProfilerFeature::StackWalk, filters, MOZ_ARRAY_LENGTH(filters), + 0); + + CheckStatesOnlyContains(ProfilingState::Started, 1); + + profiler_add_state_change_callback(AllProfilingStates(), CreateCallback(2), + 2); + // This is in case of error, and it also exercises the (allowed) removal of + // unknown callback ids. + auto cleanup2 = mozilla::MakeScopeExit( + []() { profiler_remove_state_change_callback(2); }); + CheckStatesOnlyContains(ProfilingState::AlreadyActive, 2); + + profiler_remove_state_change_callback(2); + CheckStatesOnlyContains(ProfilingState::RemovingCallback, 2); + // Note: The actual removal is effectively tested below, by not seeing any + // more invocations of the 2nd callback. + + ASSERT_EQ(WaitForSamplingState(), SamplingState::SamplingCompleted); + UniquePtr<char[]> profileCompleted = profiler_get_profile(); + CheckStatesOnlyContains(ProfilingState::GeneratingProfile, 1); + JSONOutputCheck(profileCompleted.get(), [](const Json::Value& aRoot) {}); + + profiler_pause(); + CheckStatesOnlyContains(ProfilingState::Pausing, 1); + UniquePtr<char[]> profilePaused = profiler_get_profile(); + CheckStatesOnlyContains(ProfilingState::GeneratingProfile, 1); + JSONOutputCheck(profilePaused.get(), [](const Json::Value& aRoot) {}); + + profiler_resume(); + CheckStatesOnlyContains(ProfilingState::Resumed, 1); + ASSERT_EQ(WaitForSamplingState(), SamplingState::SamplingCompleted); + UniquePtr<char[]> profileResumed = profiler_get_profile(); + CheckStatesOnlyContains(ProfilingState::GeneratingProfile, 1); + JSONOutputCheck(profileResumed.get(), [](const Json::Value& aRoot) {}); + + // This effectively stops the profiler before restarting it, but + // ProfilingState::Stopping is not notified. See `profiler_start` for details. + profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, + ProfilerFeature::StackWalk | ProfilerFeature::NoStackSampling, + filters, MOZ_ARRAY_LENGTH(filters), 0); + CheckStatesOnlyContains(ProfilingState::Started, 1); + ASSERT_EQ(WaitForSamplingState(), SamplingState::NoStackSamplingCompleted); + UniquePtr<char[]> profileNoStacks = profiler_get_profile(); + CheckStatesOnlyContains(ProfilingState::GeneratingProfile, 1); + JSONOutputCheck(profileNoStacks.get(), [](const Json::Value& aRoot) {}); + + profiler_stop(); + CheckStatesOnlyContains(ProfilingState::Stopping, 1); + ASSERT_TRUE(!profiler_is_active()); + + profiler_remove_state_change_callback(1); + CheckStatesOnlyContains(ProfilingState::RemovingCallback, 1); + + // Note: ProfilingState::ShuttingDown cannot be tested here, and the profiler + // can only be shut down once per process. +} + +TEST(GeckoProfiler, BaseProfilerHandOff) +{ + const char* filters[] = {"GeckoMain"}; + + ASSERT_TRUE(!baseprofiler::profiler_is_active()); + ASSERT_TRUE(!profiler_is_active()); + + BASE_PROFILER_MARKER_UNTYPED("Base marker before base profiler", OTHER, {}); + PROFILER_MARKER_UNTYPED("Gecko marker before base profiler", OTHER, {}); + + // Start the Base Profiler. + baseprofiler::profiler_start( + PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, + ProfilerFeature::StackWalk, filters, MOZ_ARRAY_LENGTH(filters)); + + ASSERT_TRUE(baseprofiler::profiler_is_active()); + ASSERT_TRUE(!profiler_is_active()); + + // Add at least a marker, which should go straight into the buffer. + Maybe<baseprofiler::ProfilerBufferInfo> info0 = + baseprofiler::profiler_get_buffer_info(); + BASE_PROFILER_MARKER_UNTYPED("Base marker during base profiler", OTHER, {}); + Maybe<baseprofiler::ProfilerBufferInfo> info1 = + baseprofiler::profiler_get_buffer_info(); + ASSERT_GT(info1->mRangeEnd, info0->mRangeEnd); + + PROFILER_MARKER_UNTYPED("Gecko marker during base profiler", OTHER, {}); + + // Start the Gecko Profiler, which should grab the Base Profiler profile and + // stop it. + profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, + ProfilerFeature::StackWalk, filters, MOZ_ARRAY_LENGTH(filters), + 0); + + ASSERT_TRUE(!baseprofiler::profiler_is_active()); + ASSERT_TRUE(profiler_is_active()); + + BASE_PROFILER_MARKER_UNTYPED("Base marker during gecko profiler", OTHER, {}); + PROFILER_MARKER_UNTYPED("Gecko marker during gecko profiler", OTHER, {}); + + // Write some Gecko Profiler samples. + ASSERT_EQ(WaitForSamplingState(), SamplingState::SamplingCompleted); + + // Check that the Gecko Profiler profile contains at least the Base Profiler + // main thread samples. + UniquePtr<char[]> profile = profiler_get_profile(); + + profiler_stop(); + ASSERT_TRUE(!profiler_is_active()); + + BASE_PROFILER_MARKER_UNTYPED("Base marker after gecko profiler", OTHER, {}); + PROFILER_MARKER_UNTYPED("Gecko marker after gecko profiler", OTHER, {}); + + JSONOutputCheck(profile.get(), [](const Json::Value& aRoot) { + GET_JSON(threads, aRoot["threads"], Array); + { + bool found = false; + for (const Json::Value& thread : threads) { + ASSERT_TRUE(thread.isObject()); + GET_JSON(name, thread["name"], String); + if (name.asString() == "GeckoMain") { + found = true; + EXPECT_JSON_ARRAY_EXCLUDES(thread["stringTable"], String, + "Base marker before base profiler"); + EXPECT_JSON_ARRAY_EXCLUDES(thread["stringTable"], String, + "Gecko marker before base profiler"); + EXPECT_JSON_ARRAY_CONTAINS(thread["stringTable"], String, + "Base marker during base profiler"); + EXPECT_JSON_ARRAY_EXCLUDES(thread["stringTable"], String, + "Gecko marker during base profiler"); + EXPECT_JSON_ARRAY_CONTAINS(thread["stringTable"], String, + "Base marker during gecko profiler"); + EXPECT_JSON_ARRAY_CONTAINS(thread["stringTable"], String, + "Gecko marker during gecko profiler"); + EXPECT_JSON_ARRAY_EXCLUDES(thread["stringTable"], String, + "Base marker after gecko profiler"); + EXPECT_JSON_ARRAY_EXCLUDES(thread["stringTable"], String, + "Gecko marker after gecko profiler"); + break; + } + } + EXPECT_TRUE(found); + } + }); +} + +static std::string_view GetFeatureName(uint32_t feature) { + switch (feature) { +# define FEATURE_NAME(n_, str_, Name_, desc_) \ + case ProfilerFeature::Name_: \ + return str_; + + PROFILER_FOR_EACH_FEATURE(FEATURE_NAME) + +# undef FEATURE_NAME + + default: + return "?"; + } +} + +TEST(GeckoProfiler, FeatureCombinations) +{ + // Bug 1845606 + #ifdef XP_WIN + if (!IsWin8OrLater()) { + return; + } + #endif + + const char* filters[] = {"*"}; + + // List of features to test. Every combination of up to 3 of them will be + // tested, so be careful not to add too many to keep the test run at a + // reasonable time. + uint32_t featureList[] = {ProfilerFeature::JS, + ProfilerFeature::Screenshots, + ProfilerFeature::StackWalk, + ProfilerFeature::NoStackSampling, + ProfilerFeature::NativeAllocations, + ProfilerFeature::CPUUtilization, + ProfilerFeature::CPUAllThreads, + ProfilerFeature::SamplingAllThreads, + ProfilerFeature::MarkersAllThreads, + ProfilerFeature::UnregisteredThreads}; + constexpr uint32_t featureCount = uint32_t(MOZ_ARRAY_LENGTH(featureList)); + + auto testFeatures = [&](uint32_t features, + const std::string& featuresString) { + SCOPED_TRACE(featuresString.c_str()); + + ASSERT_TRUE(!profiler_is_active()); + + profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, + features, filters, MOZ_ARRAY_LENGTH(filters), 0); + + ASSERT_TRUE(profiler_is_active()); + + // Write some Gecko Profiler samples. + EXPECT_EQ(WaitForSamplingState(), + (((features & ProfilerFeature::NoStackSampling) != 0) && + ((features & (ProfilerFeature::CPUUtilization | + ProfilerFeature::CPUAllThreads)) == 0)) + ? SamplingState::NoStackSamplingCompleted + : SamplingState::SamplingCompleted); + + // Check that the profile looks valid. Note that we don't test feature- + // specific changes. + UniquePtr<char[]> profile = profiler_get_profile(); + JSONOutputCheck(profile.get(), [](const Json::Value& aRoot) {}); + + profiler_stop(); + ASSERT_TRUE(!profiler_is_active()); + }; + + testFeatures(0, "Features: (none)"); + + for (uint32_t f1 = 0u; f1 < featureCount; ++f1) { + const uint32_t features1 = featureList[f1]; + std::string features1String = "Features: "; + features1String += GetFeatureName(featureList[f1]); + + testFeatures(features1, features1String); + + for (uint32_t f2 = f1 + 1u; f2 < featureCount; ++f2) { + const uint32_t features12 = f1 | featureList[f2]; + std::string features12String = features1String + " "; + features12String += GetFeatureName(featureList[f2]); + + testFeatures(features12, features12String); + + for (uint32_t f3 = f2 + 1u; f3 < featureCount; ++f3) { + const uint32_t features123 = features12 | featureList[f3]; + std::string features123String = features12String + " "; + features123String += GetFeatureName(featureList[f3]); + + testFeatures(features123, features123String); + } + } + } +} + +static void CountCPUDeltas(const Json::Value& aThread, size_t& aOutSamplings, + uint64_t& aOutCPUDeltaSum) { + GET_JSON(samples, aThread["samples"], Object); + { + Json::ArrayIndex threadCPUDeltaIndex = 0; + GET_JSON(schema, samples["schema"], Object); + { + GET_JSON(jsonThreadCPUDeltaIndex, schema["threadCPUDelta"], UInt); + threadCPUDeltaIndex = jsonThreadCPUDeltaIndex.asUInt(); + } + + aOutSamplings = 0; + aOutCPUDeltaSum = 0; + GET_JSON(data, samples["data"], Array); + aOutSamplings = data.size(); + for (const Json::Value& sample : data) { + ASSERT_TRUE(sample.isArray()); + if (sample.isValidIndex(threadCPUDeltaIndex)) { + if (!sample[threadCPUDeltaIndex].isNull()) { + GET_JSON(cpuDelta, sample[threadCPUDeltaIndex], UInt64); + aOutCPUDeltaSum += uint64_t(cpuDelta.asUInt64()); + } + } + } + } +} + +TEST(GeckoProfiler, CPUUsage) +{ + profiler_init_main_thread_id(); + ASSERT_TRUE(profiler_is_main_thread()) + << "This test assumes it runs on the main thread"; + + const char* filters[] = {"GeckoMain", "Idle test", "Busy test"}; + + enum class TestThreadsState { + // Initial state, while constructing and starting the idle thread. + STARTING, + // Set by the idle thread just before running its main mostly-idle loop. + RUNNING1, + RUNNING2, + // Set by the main thread when it wants the idle thread to stop. + STOPPING + }; + Atomic<TestThreadsState> testThreadsState{TestThreadsState::STARTING}; + + std::thread idle([&]() { + AUTO_PROFILER_REGISTER_THREAD("Idle test"); + // Add a label to ensure that we have a non-empty stack, even if native + // stack-walking is not available. + AUTO_PROFILER_LABEL("Idle test", PROFILER); + ASSERT_TRUE(testThreadsState.compareExchange(TestThreadsState::STARTING, + TestThreadsState::RUNNING1) || + testThreadsState.compareExchange(TestThreadsState::RUNNING1, + TestThreadsState::RUNNING2)); + + while (testThreadsState != TestThreadsState::STOPPING) { + // Sleep for multiple profiler intervals, so the profiler should have + // samples with zero CPU utilization. + PR_Sleep(PR_MillisecondsToInterval(PROFILER_DEFAULT_INTERVAL * 10)); + } + }); + + std::thread busy([&]() { + AUTO_PROFILER_REGISTER_THREAD("Busy test"); + // Add a label to ensure that we have a non-empty stack, even if native + // stack-walking is not available. + AUTO_PROFILER_LABEL("Busy test", PROFILER); + ASSERT_TRUE(testThreadsState.compareExchange(TestThreadsState::STARTING, + TestThreadsState::RUNNING1) || + testThreadsState.compareExchange(TestThreadsState::RUNNING1, + TestThreadsState::RUNNING2)); + + while (testThreadsState != TestThreadsState::STOPPING) { + // Stay busy! + } + }); + + // Wait for idle thread to start running its main loop. + while (testThreadsState != TestThreadsState::RUNNING2) { + PR_Sleep(PR_MillisecondsToInterval(1)); + } + + // We want to ensure that CPU usage numbers are present whether or not we are + // collecting stack samples. + static constexpr bool scTestsWithOrWithoutStackSampling[] = {false, true}; + for (const bool testWithNoStackSampling : scTestsWithOrWithoutStackSampling) { + ASSERT_TRUE(!profiler_is_active()); + ASSERT_TRUE(!profiler_callback_after_sampling( + [&](SamplingState) { ASSERT_TRUE(false); })); + + profiler_start( + PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, + ProfilerFeature::StackWalk | ProfilerFeature::CPUUtilization | + (testWithNoStackSampling ? ProfilerFeature::NoStackSampling : 0), + filters, MOZ_ARRAY_LENGTH(filters), 0); + // Grab a few samples, each with a different label on the stack. +# define SAMPLE_LABEL_PREFIX "CPUUsage sample label " + static constexpr const char* scSampleLabels[] = { + SAMPLE_LABEL_PREFIX "0", SAMPLE_LABEL_PREFIX "1", + SAMPLE_LABEL_PREFIX "2", SAMPLE_LABEL_PREFIX "3", + SAMPLE_LABEL_PREFIX "4", SAMPLE_LABEL_PREFIX "5", + SAMPLE_LABEL_PREFIX "6", SAMPLE_LABEL_PREFIX "7", + SAMPLE_LABEL_PREFIX "8", SAMPLE_LABEL_PREFIX "9"}; + static constexpr size_t scSampleLabelCount = + (sizeof(scSampleLabels) / sizeof(scSampleLabels[0])); + // We'll do two samplings for each label. + static constexpr size_t scMinSamplings = scSampleLabelCount * 2; + + for (const char* sampleLabel : scSampleLabels) { + AUTO_PROFILER_LABEL(sampleLabel, OTHER); + ASSERT_EQ(WaitForSamplingState(), SamplingState::SamplingCompleted); + // Note: There could have been a delay before this label above, where the + // profiler could have sampled the stack and missed the label. By forcing + // another sampling now, the label is guaranteed to be present. + ASSERT_EQ(WaitForSamplingState(), SamplingState::SamplingCompleted); + } + + UniquePtr<char[]> profile = profiler_get_profile(); + + if (testWithNoStackSampling) { + // If we are testing nostacksampling, we shouldn't find this label prefix + // in the profile. + EXPECT_FALSE(strstr(profile.get(), SAMPLE_LABEL_PREFIX)); + } else { + // In normal sampling mode, we should find all labels. + for (const char* sampleLabel : scSampleLabels) { + EXPECT_TRUE(strstr(profile.get(), sampleLabel)); + } + } + + JSONOutputCheck(profile.get(), [testWithNoStackSampling]( + const Json::Value& aRoot) { + // Check that the "cpu" feature is present. + GET_JSON(meta, aRoot["meta"], Object); + { + GET_JSON(configuration, meta["configuration"], Object); + { + GET_JSON(features, configuration["features"], Array); + { EXPECT_JSON_ARRAY_CONTAINS(features, String, "cpu"); } + } + } + + { + GET_JSON(sampleUnits, meta["sampleUnits"], Object); + { + EXPECT_EQ_JSON(sampleUnits["time"], String, "ms"); + EXPECT_EQ_JSON(sampleUnits["eventDelay"], String, "ms"); +# if defined(GP_OS_windows) || defined(GP_OS_darwin) || \ + defined(GP_OS_linux) || defined(GP_OS_android) || defined(GP_OS_freebsd) + // Note: The exact string is not important here. + EXPECT_TRUE(sampleUnits["threadCPUDelta"].isString()) + << "There should be a sampleUnits.threadCPUDelta on this " + "platform"; +# else + EXPECT_FALSE(sampleUnits.isMember("threadCPUDelta")) + << "Unexpected sampleUnits.threadCPUDelta on this platform";; +# endif + } + } + + bool foundMain = false; + bool foundIdle = false; + uint64_t idleThreadCPUDeltaSum = 0u; + bool foundBusy = false; + uint64_t busyThreadCPUDeltaSum = 0u; + + // Check that the sample schema contains "threadCPUDelta". + GET_JSON(threads, aRoot["threads"], Array); + for (const Json::Value& thread : threads) { + ASSERT_TRUE(thread.isObject()); + GET_JSON(name, thread["name"], String); + if (name.asString() == "GeckoMain") { + foundMain = true; + GET_JSON(samples, thread["samples"], Object); + { + Json::ArrayIndex stackIndex = 0; + Json::ArrayIndex threadCPUDeltaIndex = 0; + GET_JSON(schema, samples["schema"], Object); + { + GET_JSON(jsonStackIndex, schema["stack"], UInt); + stackIndex = jsonStackIndex.asUInt(); + GET_JSON(jsonThreadCPUDeltaIndex, schema["threadCPUDelta"], UInt); + threadCPUDeltaIndex = jsonThreadCPUDeltaIndex.asUInt(); + } + + std::set<uint64_t> stackLeaves; // To count distinct leaves. + unsigned threadCPUDeltaCount = 0; + GET_JSON(data, samples["data"], Array); + if (testWithNoStackSampling) { + // When not sampling stacks, the first sampling loop will have no + // running times, so it won't output anything. + EXPECT_GE(data.size(), scMinSamplings - 1); + } else { + EXPECT_GE(data.size(), scMinSamplings); + } + for (const Json::Value& sample : data) { + ASSERT_TRUE(sample.isArray()); + if (sample.isValidIndex(stackIndex)) { + if (!sample[stackIndex].isNull()) { + GET_JSON(stack, sample[stackIndex], UInt64); + stackLeaves.insert(stack.asUInt64()); + } + } + if (sample.isValidIndex(threadCPUDeltaIndex)) { + if (!sample[threadCPUDeltaIndex].isNull()) { + EXPECT_TRUE(sample[threadCPUDeltaIndex].isUInt64()); + ++threadCPUDeltaCount; + } + } + } + + if (testWithNoStackSampling) { + // in nostacksampling mode, there should only be one kind of stack + // leaf (the root). + EXPECT_EQ(stackLeaves.size(), 1u); + } else { + // in normal sampling mode, there should be at least one kind of + // stack leaf for each distinct label. + EXPECT_GE(stackLeaves.size(), scSampleLabelCount); + } + +# if defined(GP_OS_windows) || defined(GP_OS_darwin) || \ + defined(GP_OS_linux) || defined(GP_OS_android) || defined(GP_OS_freebsd) + EXPECT_GE(threadCPUDeltaCount, data.size() - 1u) + << "There should be 'threadCPUDelta' values in all but 1 " + "samples"; +# else + // All "threadCPUDelta" data should be absent or null on unsupported + // platforms. + EXPECT_EQ(threadCPUDeltaCount, 0u); +# endif + } + } else if (name.asString() == "Idle test") { + foundIdle = true; + size_t samplings; + CountCPUDeltas(thread, samplings, idleThreadCPUDeltaSum); + if (testWithNoStackSampling) { + // When not sampling stacks, the first sampling loop will have no + // running times, so it won't output anything. + EXPECT_GE(samplings, scMinSamplings - 1); + } else { + EXPECT_GE(samplings, scMinSamplings); + } +# if !(defined(GP_OS_windows) || defined(GP_OS_darwin) || \ + defined(GP_OS_linux) || defined(GP_OS_android) || \ + defined(GP_OS_freebsd)) + // All "threadCPUDelta" data should be absent or null on unsupported + // platforms. + EXPECT_EQ(idleThreadCPUDeltaSum, 0u); +# endif + } else if (name.asString() == "Busy test") { + foundBusy = true; + size_t samplings; + CountCPUDeltas(thread, samplings, busyThreadCPUDeltaSum); + if (testWithNoStackSampling) { + // When not sampling stacks, the first sampling loop will have no + // running times, so it won't output anything. + EXPECT_GE(samplings, scMinSamplings - 1); + } else { + EXPECT_GE(samplings, scMinSamplings); + } +# if !(defined(GP_OS_windows) || defined(GP_OS_darwin) || \ + defined(GP_OS_linux) || defined(GP_OS_android) || \ + defined(GP_OS_freebsd)) + // All "threadCPUDelta" data should be absent or null on unsupported + // platforms. + EXPECT_EQ(busyThreadCPUDeltaSum, 0u); +# endif + } + } + + EXPECT_TRUE(foundMain); + EXPECT_TRUE(foundIdle); + EXPECT_TRUE(foundBusy); + EXPECT_LE(idleThreadCPUDeltaSum, busyThreadCPUDeltaSum); + }); + + // Note: There is no non-racy way to test for SamplingState::JustStopped, as + // it would require coordination between `profiler_stop()` and another + // thread doing `profiler_callback_after_sampling()` at just the right + // moment. + + profiler_stop(); + ASSERT_TRUE(!profiler_is_active()); + ASSERT_TRUE(!profiler_callback_after_sampling( + [&](SamplingState) { ASSERT_TRUE(false); })); + } + + testThreadsState = TestThreadsState::STOPPING; + busy.join(); + idle.join(); +} + +TEST(GeckoProfiler, AllThreads) +{ + // Bug 1845606 + #ifdef XP_WIN + if (!IsWin8OrLater()) { + return; + } + #endif + + profiler_init_main_thread_id(); + ASSERT_TRUE(profiler_is_main_thread()) + << "This test assumes it runs on the main thread"; + + ASSERT_EQ(static_cast<uint32_t>(ThreadProfilingFeatures::Any), 1u + 2u + 4u) + << "This test assumes that there are 3 binary choices 1+2+4; " + "Is this test up to date?"; + + for (uint32_t threadFeaturesBinary = 0u; + threadFeaturesBinary <= + static_cast<uint32_t>(ThreadProfilingFeatures::Any); + ++threadFeaturesBinary) { + ThreadProfilingFeatures threadFeatures = + static_cast<ThreadProfilingFeatures>(threadFeaturesBinary); + const bool threadCPU = DoFeaturesIntersect( + threadFeatures, ThreadProfilingFeatures::CPUUtilization); + const bool threadSampling = + DoFeaturesIntersect(threadFeatures, ThreadProfilingFeatures::Sampling); + const bool threadMarkers = + DoFeaturesIntersect(threadFeatures, ThreadProfilingFeatures::Markers); + + ASSERT_TRUE(!profiler_is_active()); + + uint32_t features = ProfilerFeature::StackWalk; + std::string featuresString = "Features: StackWalk Threads"; + if (threadCPU) { + features |= ProfilerFeature::CPUAllThreads; + featuresString += " CPUAllThreads"; + } + if (threadSampling) { + features |= ProfilerFeature::SamplingAllThreads; + featuresString += " SamplingAllThreads"; + } + if (threadMarkers) { + features |= ProfilerFeature::MarkersAllThreads; + featuresString += " MarkersAllThreads"; + } + + SCOPED_TRACE(featuresString.c_str()); + + const char* filters[] = {"GeckoMain", "Selected"}; + + EXPECT_FALSE(profiler_thread_is_being_profiled( + ThreadProfilingFeatures::CPUUtilization)); + EXPECT_FALSE( + profiler_thread_is_being_profiled(ThreadProfilingFeatures::Sampling)); + EXPECT_FALSE( + profiler_thread_is_being_profiled(ThreadProfilingFeatures::Markers)); + EXPECT_FALSE(profiler_thread_is_being_profiled_for_markers()); + + profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, + features, filters, MOZ_ARRAY_LENGTH(filters), 0); + + EXPECT_TRUE(profiler_thread_is_being_profiled( + ThreadProfilingFeatures::CPUUtilization)); + EXPECT_TRUE( + profiler_thread_is_being_profiled(ThreadProfilingFeatures::Sampling)); + EXPECT_TRUE( + profiler_thread_is_being_profiled(ThreadProfilingFeatures::Markers)); + EXPECT_TRUE(profiler_thread_is_being_profiled_for_markers()); + + // This will signal all threads to stop spinning. + Atomic<bool> stopThreads{false}; + + Atomic<int> selectedThreadSpins{0}; + std::thread selectedThread([&]() { + AUTO_PROFILER_REGISTER_THREAD("Selected test thread"); + // Add a label to ensure that we have a non-empty stack, even if native + // stack-walking is not available. + AUTO_PROFILER_LABEL("Selected test thread", PROFILER); + EXPECT_TRUE(profiler_thread_is_being_profiled( + ThreadProfilingFeatures::CPUUtilization)); + EXPECT_TRUE( + profiler_thread_is_being_profiled(ThreadProfilingFeatures::Sampling)); + EXPECT_TRUE( + profiler_thread_is_being_profiled(ThreadProfilingFeatures::Markers)); + EXPECT_TRUE(profiler_thread_is_being_profiled_for_markers()); + while (!stopThreads) { + PROFILER_MARKER_UNTYPED("Spinning Selected!", PROFILER); + ++selectedThreadSpins; + PR_Sleep(PR_MillisecondsToInterval(1)); + } + }); + + Atomic<int> unselectedThreadSpins{0}; + std::thread unselectedThread([&]() { + AUTO_PROFILER_REGISTER_THREAD("Registered test thread"); + // Add a label to ensure that we have a non-empty stack, even if native + // stack-walking is not available. + AUTO_PROFILER_LABEL("Registered test thread", PROFILER); + // This thread is *not* selected for full profiling, but it may still be + // profiled depending on the -allthreads features. + EXPECT_EQ(profiler_thread_is_being_profiled( + ThreadProfilingFeatures::CPUUtilization), + threadCPU); + EXPECT_EQ( + profiler_thread_is_being_profiled(ThreadProfilingFeatures::Sampling), + threadSampling); + EXPECT_EQ( + profiler_thread_is_being_profiled(ThreadProfilingFeatures::Markers), + threadMarkers); + EXPECT_EQ(profiler_thread_is_being_profiled_for_markers(), threadMarkers); + while (!stopThreads) { + PROFILER_MARKER_UNTYPED("Spinning Registered!", PROFILER); + ++unselectedThreadSpins; + PR_Sleep(PR_MillisecondsToInterval(1)); + } + }); + + Atomic<int> unregisteredThreadSpins{0}; + std::thread unregisteredThread([&]() { + // No `AUTO_PROFILER_REGISTER_THREAD` here. + EXPECT_FALSE(profiler_thread_is_being_profiled( + ThreadProfilingFeatures::CPUUtilization)); + EXPECT_FALSE( + profiler_thread_is_being_profiled(ThreadProfilingFeatures::Sampling)); + EXPECT_FALSE( + profiler_thread_is_being_profiled(ThreadProfilingFeatures::Markers)); + EXPECT_FALSE(profiler_thread_is_being_profiled_for_markers()); + while (!stopThreads) { + PROFILER_MARKER_UNTYPED("Spinning Unregistered!", PROFILER); + ++unregisteredThreadSpins; + PR_Sleep(PR_MillisecondsToInterval(1)); + } + }); + + // Wait for all threads to have started at least one spin. + while (selectedThreadSpins == 0 || unselectedThreadSpins == 0 || + unregisteredThreadSpins == 0) { + PR_Sleep(PR_MillisecondsToInterval(1)); + } + + // Wait until the sampler has done at least one loop. + ASSERT_EQ(WaitForSamplingState(), SamplingState::SamplingCompleted); + + // Restart the spin counts, and ensure each threads will do at least one + // more spin each. Since spins are increased after PROFILER_MARKER calls, in + // the worst case, each thread will have attempted to record at least one + // marker. + selectedThreadSpins = 0; + unselectedThreadSpins = 0; + unregisteredThreadSpins = 0; + while (selectedThreadSpins < 1 && unselectedThreadSpins < 1 && + unregisteredThreadSpins < 1) { + ASSERT_EQ(WaitForSamplingState(), SamplingState::SamplingCompleted); + } + + profiler_pause(); + UniquePtr<char[]> profile = profiler_get_profile(); + + profiler_stop(); + stopThreads = true; + unregisteredThread.join(); + unselectedThread.join(); + selectedThread.join(); + + JSONOutputCheck(profile.get(), [&](const Json::Value& aRoot) { + GET_JSON(threads, aRoot["threads"], Array); + int foundMain = 0; + int foundSelected = 0; + int foundSelectedMarker = 0; + int foundUnselected = 0; + int foundUnselectedMarker = 0; + for (const Json::Value& thread : threads) { + ASSERT_TRUE(thread.isObject()); + GET_JSON(stringTable, thread["stringTable"], Array); + GET_JSON(name, thread["name"], String); + if (name.asString() == "GeckoMain") { + ++foundMain; + // Don't check the main thread further in this test. + + } else if (name.asString() == "Selected test thread") { + ++foundSelected; + + GET_JSON(samples, thread["samples"], Object); + GET_JSON(samplesData, samples["data"], Array); + EXPECT_GT(samplesData.size(), 0u); + + GET_JSON(markers, thread["markers"], Object); + GET_JSON(markersData, markers["data"], Array); + for (const Json::Value& marker : markersData) { + const unsigned int NAME = 0u; + ASSERT_TRUE(marker[NAME].isUInt()); // name id + GET_JSON(name, stringTable[marker[NAME].asUInt()], String); + if (name == "Spinning Selected!") { + ++foundSelectedMarker; + } + } + } else if (name.asString() == "Registered test thread") { + ++foundUnselected; + + GET_JSON(samples, thread["samples"], Object); + GET_JSON(samplesData, samples["data"], Array); + if (threadCPU || threadSampling) { + EXPECT_GT(samplesData.size(), 0u); + } else { + EXPECT_EQ(samplesData.size(), 0u); + } + + GET_JSON(markers, thread["markers"], Object); + GET_JSON(markersData, markers["data"], Array); + for (const Json::Value& marker : markersData) { + const unsigned int NAME = 0u; + ASSERT_TRUE(marker[NAME].isUInt()); // name id + GET_JSON(name, stringTable[marker[NAME].asUInt()], String); + if (name == "Spinning Registered!") { + ++foundUnselectedMarker; + } + } + + } else { + EXPECT_STRNE(name.asString().c_str(), + "Unregistered test thread label"); + } + } + EXPECT_EQ(foundMain, 1); + EXPECT_EQ(foundSelected, 1); + EXPECT_GT(foundSelectedMarker, 0); + EXPECT_EQ(foundUnselected, + (threadCPU || threadSampling || threadMarkers) ? 1 : 0) + << "Unselected thread should only be present if at least one of the " + "allthreads feature is on"; + if (threadMarkers) { + EXPECT_GT(foundUnselectedMarker, 0); + } else { + EXPECT_EQ(foundUnselectedMarker, 0); + } + }); + } +} + +TEST(GeckoProfiler, FailureHandling) +{ + profiler_init_main_thread_id(); + ASSERT_TRUE(profiler_is_main_thread()) + << "This test assumes it runs on the main thread"; + + uint32_t features = ProfilerFeature::StackWalk; + const char* filters[] = {"GeckoMain"}; + profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, features, + filters, MOZ_ARRAY_LENGTH(filters), 0); + + ASSERT_EQ(WaitForSamplingState(), SamplingState::SamplingCompleted); + + // User-defined marker type that generates a failure when streaming JSON. + struct GtestFailingMarker { + static constexpr Span<const char> MarkerTypeName() { + return MakeStringSpan("markers-gtest-failing"); + } + static void StreamJSONMarkerData( + mozilla::baseprofiler::SpliceableJSONWriter& aWriter) { + aWriter.SetFailure("boom!"); + } + static mozilla::MarkerSchema MarkerTypeDisplay() { + return mozilla::MarkerSchema::SpecialFrontendLocation{}; + } + }; + EXPECT_TRUE(profiler_add_marker("Gtest failing marker", + geckoprofiler::category::OTHER, {}, + GtestFailingMarker{})); + + ASSERT_EQ(WaitForSamplingState(), SamplingState::SamplingCompleted); + profiler_pause(); + + FailureLatchSource failureLatch; + SpliceableChunkedJSONWriter w{failureLatch}; + EXPECT_FALSE(w.Failed()); + ASSERT_FALSE(w.GetFailure()); + + w.Start(); + EXPECT_FALSE(w.Failed()); + ASSERT_FALSE(w.GetFailure()); + + // The marker will cause a failure during this function call. + EXPECT_FALSE(::profiler_stream_json_for_this_process(w).isOk()); + EXPECT_TRUE(w.Failed()); + ASSERT_TRUE(w.GetFailure()); + EXPECT_EQ(strcmp(w.GetFailure(), "boom!"), 0); + + // Already failed, check that we don't crash or reset the failure. + EXPECT_FALSE(::profiler_stream_json_for_this_process(w).isOk()); + EXPECT_TRUE(w.Failed()); + ASSERT_TRUE(w.GetFailure()); + EXPECT_EQ(strcmp(w.GetFailure(), "boom!"), 0); + + w.End(); + + profiler_stop(); + + EXPECT_TRUE(w.Failed()); + ASSERT_TRUE(w.GetFailure()); + EXPECT_EQ(strcmp(w.GetFailure(), "boom!"), 0); + + UniquePtr<char[]> profile = w.ChunkedWriteFunc().CopyData(); + ASSERT_EQ(profile.get(), nullptr); +} + +TEST(GeckoProfiler, NoMarkerStacks) +{ + uint32_t features = ProfilerFeature::NoMarkerStacks; + const char* filters[] = {"GeckoMain"}; + + ASSERT_TRUE(!profiler_get_profile()); + + // Make sure that profiler_capture_backtrace returns nullptr when the profiler + // is not active. + ASSERT_TRUE(!profiler_capture_backtrace()); + + { + // Start the profiler without the NoMarkerStacks feature and make sure we + // capture stacks. + profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, + /* features */ 0, filters, MOZ_ARRAY_LENGTH(filters), 0); + + ASSERT_TRUE(profiler_capture_backtrace()); + profiler_stop(); + } + + // Start the profiler without the NoMarkerStacks feature and make sure we + // don't capture stacks. + profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, features, + filters, MOZ_ARRAY_LENGTH(filters), 0); + + // Make sure that the active features has the NoMarkerStacks feature. + mozilla::Maybe<uint32_t> activeFeatures = profiler_features_if_active(); + ASSERT_TRUE(activeFeatures.isSome()); + ASSERT_TRUE(ProfilerFeature::HasNoMarkerStacks(*activeFeatures)); + + // Make sure we don't capture stacks. + ASSERT_TRUE(!profiler_capture_backtrace()); + + // Add a marker with a stack to test. + EXPECT_TRUE(profiler_add_marker( + "Text with stack", geckoprofiler::category::OTHER, MarkerStack::Capture(), + geckoprofiler::markers::TextMarker{}, "")); + + UniquePtr<char[]> profile = profiler_get_profile(); + JSONOutputCheck(profile.get(), [&](const Json::Value& aRoot) { + // Check that the meta.configuration.features array contains + // "nomarkerstacks". + GET_JSON(meta, aRoot["meta"], Object); + { + GET_JSON(configuration, meta["configuration"], Object); + { + GET_JSON(features, configuration["features"], Array); + { + EXPECT_EQ(features.size(), 1u); + EXPECT_JSON_ARRAY_CONTAINS(features, String, "nomarkerstacks"); + } + } + } + + // Make sure that the marker we captured doesn't have a stack. + GET_JSON(threads, aRoot["threads"], Array); + { + ASSERT_EQ(threads.size(), 1u); + GET_JSON(thread0, threads[0], Object); + { + GET_JSON(markers, thread0["markers"], Object); + { + GET_JSON(data, markers["data"], Array); + { + const unsigned int NAME = 0u; + const unsigned int PAYLOAD = 5u; + bool foundMarker = false; + GET_JSON(stringTable, thread0["stringTable"], Array); + + for (const Json::Value& marker : data) { + // Even though we only added one marker, some markers like + // NotifyObservers are being added as well. Let's iterate over + // them and make sure that we have the one we added explicitly and + // check its stack doesn't exist. + GET_JSON(name, stringTable[marker[NAME].asUInt()], String); + std::string nameString = name.asString(); + + if (nameString == "Text with stack") { + // Make sure that the marker doesn't have a stack. + foundMarker = true; + EXPECT_FALSE(marker[PAYLOAD].isNull()); + EXPECT_TRUE(marker[PAYLOAD]["stack"].isNull()); + } + } + + EXPECT_TRUE(foundMarker); + } + } + } + } + }); + + profiler_stop(); + + ASSERT_TRUE(!profiler_get_profile()); +} + +#endif // MOZ_GECKO_PROFILER diff --git a/tools/profiler/tests/gtest/LulTest.cpp b/tools/profiler/tests/gtest/LulTest.cpp new file mode 100644 index 0000000000..159a366567 --- /dev/null +++ b/tools/profiler/tests/gtest/LulTest.cpp @@ -0,0 +1,51 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "gtest/gtest.h" +#include "mozilla/Atomics.h" +#include "LulMain.h" +#include "GeckoProfiler.h" // for TracingKind +#include "platform-linux-lul.h" // for read_procmaps + +// Set this to 0 to make LUL be completely silent during tests. +// Set it to 1 to get logging output from LUL, presumably for +// the purpose of debugging it. +#define DEBUG_LUL_TEST 0 + +// LUL needs a callback for its logging sink. +static void gtest_logging_sink_for_LulIntegration(const char* str) { + if (DEBUG_LUL_TEST == 0) { + return; + } + // Ignore any trailing \n, since LOG will add one anyway. + size_t n = strlen(str); + if (n > 0 && str[n - 1] == '\n') { + char* tmp = strdup(str); + tmp[n - 1] = 0; + fprintf(stderr, "LUL-in-gtest: %s\n", tmp); + free(tmp); + } else { + fprintf(stderr, "LUL-in-gtest: %s\n", str); + } +} + +TEST(LulIntegration, unwind_consistency) +{ + // Set up LUL and get it to read unwind info for libxul.so, which is + // all we care about here, plus (incidentally) practically every + // other object in the process too. + lul::LUL* lul = new lul::LUL(gtest_logging_sink_for_LulIntegration); + read_procmaps(lul); + + // Run unwind tests and receive information about how many there + // were and how many were successful. + lul->EnableUnwinding(); + int nTests = 0, nTestsPassed = 0; + RunLulUnitTests(&nTests, &nTestsPassed, lul); + EXPECT_TRUE(nTests == 6) << "Unexpected number of tests"; + EXPECT_EQ(nTestsPassed, nTests) << "Not all tests passed"; + + delete lul; +} diff --git a/tools/profiler/tests/gtest/LulTestDwarf.cpp b/tools/profiler/tests/gtest/LulTestDwarf.cpp new file mode 100644 index 0000000000..55373ec093 --- /dev/null +++ b/tools/profiler/tests/gtest/LulTestDwarf.cpp @@ -0,0 +1,2733 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "gtest/gtest.h" +#include "gmock/gmock.h" +#include "LulCommonExt.h" +#include "LulDwarfExt.h" +#include "LulDwarfInt.h" +#include "LulTestInfrastructure.h" + +using lul_test::CFISection; +using lul_test::test_assembler::kBigEndian; +using lul_test::test_assembler::kLittleEndian; +using lul_test::test_assembler::Label; +using testing::_; +using testing::InSequence; +using testing::Return; +using testing::Sequence; +using testing::Test; + +#define PERHAPS_WRITE_DEBUG_FRAME_FILE(name, section) /**/ +#define PERHAPS_WRITE_EH_FRAME_FILE(name, section) /**/ + +// Set this to 0 to make LUL be completely silent during tests. +// Set it to 1 to get logging output from LUL, presumably for +// the purpose of debugging it. +#define DEBUG_LUL_TEST_DWARF 0 + +// LUL needs a callback for its logging sink. +static void gtest_logging_sink_for_LulTestDwarf(const char* str) { + if (DEBUG_LUL_TEST_DWARF == 0) { + return; + } + // Ignore any trailing \n, since LOG will add one anyway. + size_t n = strlen(str); + if (n > 0 && str[n - 1] == '\n') { + char* tmp = strdup(str); + tmp[n - 1] = 0; + fprintf(stderr, "LUL-in-gtest: %s\n", tmp); + free(tmp); + } else { + fprintf(stderr, "LUL-in-gtest: %s\n", str); + } +} + +namespace lul { + +class MockCallFrameInfoHandler : public CallFrameInfo::Handler { + public: + MOCK_METHOD6(Entry, + bool(size_t offset, uint64 address, uint64 length, uint8 version, + const std::string& augmentation, unsigned return_address)); + MOCK_METHOD2(UndefinedRule, bool(uint64 address, int reg)); + MOCK_METHOD2(SameValueRule, bool(uint64 address, int reg)); + MOCK_METHOD4(OffsetRule, + bool(uint64 address, int reg, int base_register, long offset)); + MOCK_METHOD4(ValOffsetRule, + bool(uint64 address, int reg, int base_register, long offset)); + MOCK_METHOD3(RegisterRule, bool(uint64 address, int reg, int base_register)); + MOCK_METHOD3(ExpressionRule, + bool(uint64 address, int reg, const ImageSlice& expression)); + MOCK_METHOD3(ValExpressionRule, + bool(uint64 address, int reg, const ImageSlice& expression)); + MOCK_METHOD0(End, bool()); + MOCK_METHOD2(PersonalityRoutine, bool(uint64 address, bool indirect)); + MOCK_METHOD2(LanguageSpecificDataArea, bool(uint64 address, bool indirect)); + MOCK_METHOD0(SignalHandler, bool()); +}; + +class MockCallFrameErrorReporter : public CallFrameInfo::Reporter { + public: + MockCallFrameErrorReporter() + : Reporter(gtest_logging_sink_for_LulTestDwarf, "mock filename", + "mock section") {} + MOCK_METHOD2(Incomplete, void(uint64, CallFrameInfo::EntryKind)); + MOCK_METHOD1(EarlyEHTerminator, void(uint64)); + MOCK_METHOD2(CIEPointerOutOfRange, void(uint64, uint64)); + MOCK_METHOD2(BadCIEId, void(uint64, uint64)); + MOCK_METHOD2(UnrecognizedVersion, void(uint64, int version)); + MOCK_METHOD2(UnrecognizedAugmentation, void(uint64, const string&)); + MOCK_METHOD2(InvalidPointerEncoding, void(uint64, uint8)); + MOCK_METHOD2(UnusablePointerEncoding, void(uint64, uint8)); + MOCK_METHOD2(RestoreInCIE, void(uint64, uint64)); + MOCK_METHOD3(BadInstruction, void(uint64, CallFrameInfo::EntryKind, uint64)); + MOCK_METHOD3(NoCFARule, void(uint64, CallFrameInfo::EntryKind, uint64)); + MOCK_METHOD3(EmptyStateStack, void(uint64, CallFrameInfo::EntryKind, uint64)); + MOCK_METHOD3(ClearingCFARule, void(uint64, CallFrameInfo::EntryKind, uint64)); +}; + +struct CFIFixture { + enum { kCFARegister = CallFrameInfo::Handler::kCFARegister }; + + CFIFixture() { + // Default expectations for the data handler. + // + // - Leave Entry and End without expectations, as it's probably a + // good idea to set those explicitly in each test. + // + // - Expect the *Rule functions to not be called, + // so that each test can simply list the calls they expect. + // + // I gather I could use StrictMock for this, but the manual seems + // to suggest using that only as a last resort, and this isn't so + // bad. + EXPECT_CALL(handler, UndefinedRule(_, _)).Times(0); + EXPECT_CALL(handler, SameValueRule(_, _)).Times(0); + EXPECT_CALL(handler, OffsetRule(_, _, _, _)).Times(0); + EXPECT_CALL(handler, ValOffsetRule(_, _, _, _)).Times(0); + EXPECT_CALL(handler, RegisterRule(_, _, _)).Times(0); + EXPECT_CALL(handler, ExpressionRule(_, _, _)).Times(0); + EXPECT_CALL(handler, ValExpressionRule(_, _, _)).Times(0); + EXPECT_CALL(handler, PersonalityRoutine(_, _)).Times(0); + EXPECT_CALL(handler, LanguageSpecificDataArea(_, _)).Times(0); + EXPECT_CALL(handler, SignalHandler()).Times(0); + + // Default expectations for the error/warning reporer. + EXPECT_CALL(reporter, Incomplete(_, _)).Times(0); + EXPECT_CALL(reporter, EarlyEHTerminator(_)).Times(0); + EXPECT_CALL(reporter, CIEPointerOutOfRange(_, _)).Times(0); + EXPECT_CALL(reporter, BadCIEId(_, _)).Times(0); + EXPECT_CALL(reporter, UnrecognizedVersion(_, _)).Times(0); + EXPECT_CALL(reporter, UnrecognizedAugmentation(_, _)).Times(0); + EXPECT_CALL(reporter, InvalidPointerEncoding(_, _)).Times(0); + EXPECT_CALL(reporter, UnusablePointerEncoding(_, _)).Times(0); + EXPECT_CALL(reporter, RestoreInCIE(_, _)).Times(0); + EXPECT_CALL(reporter, BadInstruction(_, _, _)).Times(0); + EXPECT_CALL(reporter, NoCFARule(_, _, _)).Times(0); + EXPECT_CALL(reporter, EmptyStateStack(_, _, _)).Times(0); + EXPECT_CALL(reporter, ClearingCFARule(_, _, _)).Times(0); + } + + MockCallFrameInfoHandler handler; + MockCallFrameErrorReporter reporter; +}; + +class LulDwarfCFI : public CFIFixture, public Test {}; + +TEST_F(LulDwarfCFI, EmptyRegion) { + EXPECT_CALL(handler, Entry(_, _, _, _, _, _)).Times(0); + EXPECT_CALL(handler, End()).Times(0); + static const char data[1] = {42}; + + ByteReader reader(ENDIANNESS_BIG); + CallFrameInfo parser(data, 0, &reader, &handler, &reporter); + EXPECT_TRUE(parser.Start()); +} + +TEST_F(LulDwarfCFI, IncompleteLength32) { + CFISection section(kBigEndian, 8); + section + // Not even long enough for an initial length. + .D16(0xa0f) + // Padding to keep valgrind happy. We subtract these off when we + // construct the parser. + .D16(0); + + EXPECT_CALL(handler, Entry(_, _, _, _, _, _)).Times(0); + EXPECT_CALL(handler, End()).Times(0); + + EXPECT_CALL(reporter, Incomplete(_, CallFrameInfo::kUnknown)) + .WillOnce(Return()); + + string contents; + ASSERT_TRUE(section.GetContents(&contents)); + + ByteReader reader(ENDIANNESS_BIG); + reader.SetAddressSize(8); + CallFrameInfo parser(contents.data(), contents.size() - 2, &reader, &handler, + &reporter); + EXPECT_FALSE(parser.Start()); +} + +TEST_F(LulDwarfCFI, IncompleteLength64) { + CFISection section(kLittleEndian, 4); + section + // An incomplete 64-bit DWARF initial length. + .D32(0xffffffff) + .D32(0x71fbaec2) + // Padding to keep valgrind happy. We subtract these off when we + // construct the parser. + .D32(0); + + EXPECT_CALL(handler, Entry(_, _, _, _, _, _)).Times(0); + EXPECT_CALL(handler, End()).Times(0); + + EXPECT_CALL(reporter, Incomplete(_, CallFrameInfo::kUnknown)) + .WillOnce(Return()); + + string contents; + ASSERT_TRUE(section.GetContents(&contents)); + + ByteReader reader(ENDIANNESS_LITTLE); + reader.SetAddressSize(4); + CallFrameInfo parser(contents.data(), contents.size() - 4, &reader, &handler, + &reporter); + EXPECT_FALSE(parser.Start()); +} + +TEST_F(LulDwarfCFI, IncompleteId32) { + CFISection section(kBigEndian, 8); + section + .D32(3) // Initial length, not long enough for id + .D8(0xd7) + .D8(0xe5) + .D8(0xf1) // incomplete id + .CIEHeader(8727, 3983, 8889, 3, "") + .FinishEntry(); + + EXPECT_CALL(handler, Entry(_, _, _, _, _, _)).Times(0); + EXPECT_CALL(handler, End()).Times(0); + + EXPECT_CALL(reporter, Incomplete(_, CallFrameInfo::kUnknown)) + .WillOnce(Return()); + + string contents; + ASSERT_TRUE(section.GetContents(&contents)); + + ByteReader reader(ENDIANNESS_BIG); + reader.SetAddressSize(8); + CallFrameInfo parser(contents.data(), contents.size(), &reader, &handler, + &reporter); + EXPECT_FALSE(parser.Start()); +} + +TEST_F(LulDwarfCFI, BadId32) { + CFISection section(kBigEndian, 8); + section + .D32(0x100) // Initial length + .D32(0xe802fade) // bogus ID + .Append(0x100 - 4, 0x42); // make the length true + section.CIEHeader(1672, 9872, 8529, 3, "").FinishEntry(); + + EXPECT_CALL(handler, Entry(_, _, _, _, _, _)).Times(0); + EXPECT_CALL(handler, End()).Times(0); + + EXPECT_CALL(reporter, CIEPointerOutOfRange(_, 0xe802fade)).WillOnce(Return()); + + string contents; + ASSERT_TRUE(section.GetContents(&contents)); + + ByteReader reader(ENDIANNESS_BIG); + reader.SetAddressSize(8); + CallFrameInfo parser(contents.data(), contents.size(), &reader, &handler, + &reporter); + EXPECT_FALSE(parser.Start()); +} + +// A lone CIE shouldn't cause any handler calls. +TEST_F(LulDwarfCFI, SingleCIE) { + CFISection section(kLittleEndian, 4); + section.CIEHeader(0xffe799a8, 0x3398dcdd, 0x6e9683de, 3, ""); + section.Append(10, lul::DW_CFA_nop); + section.FinishEntry(); + + PERHAPS_WRITE_DEBUG_FRAME_FILE("SingleCIE", section); + + EXPECT_CALL(handler, Entry(_, _, _, _, _, _)).Times(0); + EXPECT_CALL(handler, End()).Times(0); + + string contents; + EXPECT_TRUE(section.GetContents(&contents)); + ByteReader reader(ENDIANNESS_LITTLE); + reader.SetAddressSize(4); + CallFrameInfo parser(contents.data(), contents.size(), &reader, &handler, + &reporter); + EXPECT_TRUE(parser.Start()); +} + +// One FDE, one CIE. +TEST_F(LulDwarfCFI, OneFDE) { + CFISection section(kBigEndian, 4); + Label cie; + section.Mark(&cie) + .CIEHeader(0x4be22f75, 0x2492236e, 0x6b6efb87, 3, "") + .FinishEntry() + .FDEHeader(cie, 0x7714740d, 0x3d5a10cd) + .FinishEntry(); + + PERHAPS_WRITE_DEBUG_FRAME_FILE("OneFDE", section); + + { + InSequence s; + EXPECT_CALL(handler, Entry(_, 0x7714740d, 0x3d5a10cd, 3, "", 0x6b6efb87)) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + } + + string contents; + EXPECT_TRUE(section.GetContents(&contents)); + ByteReader reader(ENDIANNESS_BIG); + reader.SetAddressSize(4); + CallFrameInfo parser(contents.data(), contents.size(), &reader, &handler, + &reporter); + EXPECT_TRUE(parser.Start()); +} + +// Two FDEs share a CIE. +TEST_F(LulDwarfCFI, TwoFDEsOneCIE) { + CFISection section(kBigEndian, 4); + Label cie; + section + // First FDE. readelf complains about this one because it makes + // a forward reference to its CIE. + .FDEHeader(cie, 0xa42744df, 0xa3b42121) + .FinishEntry() + // CIE. + .Mark(&cie) + .CIEHeader(0x04f7dc7b, 0x3d00c05f, 0xbd43cb59, 3, "") + .FinishEntry() + // Second FDE. + .FDEHeader(cie, 0x6057d391, 0x700f608d) + .FinishEntry(); + + PERHAPS_WRITE_DEBUG_FRAME_FILE("TwoFDEsOneCIE", section); + + { + InSequence s; + EXPECT_CALL(handler, Entry(_, 0xa42744df, 0xa3b42121, 3, "", 0xbd43cb59)) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + } + { + InSequence s; + EXPECT_CALL(handler, Entry(_, 0x6057d391, 0x700f608d, 3, "", 0xbd43cb59)) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + } + + string contents; + EXPECT_TRUE(section.GetContents(&contents)); + ByteReader reader(ENDIANNESS_BIG); + reader.SetAddressSize(4); + CallFrameInfo parser(contents.data(), contents.size(), &reader, &handler, + &reporter); + EXPECT_TRUE(parser.Start()); +} + +// Two FDEs, two CIEs. +TEST_F(LulDwarfCFI, TwoFDEsTwoCIEs) { + CFISection section(kLittleEndian, 8); + Label cie1, cie2; + section + // First CIE. + .Mark(&cie1) + .CIEHeader(0x694d5d45, 0x4233221b, 0xbf45e65a, 3, "") + .FinishEntry() + // First FDE which cites second CIE. readelf complains about + // this one because it makes a forward reference to its CIE. + .FDEHeader(cie2, 0x778b27dfe5871f05ULL, 0x324ace3448070926ULL) + .FinishEntry() + // Second FDE, which cites first CIE. + .FDEHeader(cie1, 0xf6054ca18b10bf5fULL, 0x45fdb970d8bca342ULL) + .FinishEntry() + // Second CIE. + .Mark(&cie2) + .CIEHeader(0xfba3fad7, 0x6287e1fd, 0x61d2c581, 2, "") + .FinishEntry(); + + PERHAPS_WRITE_DEBUG_FRAME_FILE("TwoFDEsTwoCIEs", section); + + { + InSequence s; + EXPECT_CALL(handler, Entry(_, 0x778b27dfe5871f05ULL, 0x324ace3448070926ULL, + 2, "", 0x61d2c581)) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + } + { + InSequence s; + EXPECT_CALL(handler, Entry(_, 0xf6054ca18b10bf5fULL, 0x45fdb970d8bca342ULL, + 3, "", 0xbf45e65a)) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + } + + string contents; + EXPECT_TRUE(section.GetContents(&contents)); + ByteReader reader(ENDIANNESS_LITTLE); + reader.SetAddressSize(8); + CallFrameInfo parser(contents.data(), contents.size(), &reader, &handler, + &reporter); + EXPECT_TRUE(parser.Start()); +} + +// An FDE whose CIE specifies a version we don't recognize. +TEST_F(LulDwarfCFI, BadVersion) { + CFISection section(kBigEndian, 4); + Label cie1, cie2; + section.Mark(&cie1) + .CIEHeader(0xca878cf0, 0x7698ec04, 0x7b616f54, 0x52, "") + .FinishEntry() + // We should skip this entry, as its CIE specifies a version we + // don't recognize. + .FDEHeader(cie1, 0x08852292, 0x2204004a) + .FinishEntry() + // Despite the above, we should visit this entry. + .Mark(&cie2) + .CIEHeader(0x7c3ae7c9, 0xb9b9a512, 0x96cb3264, 3, "") + .FinishEntry() + .FDEHeader(cie2, 0x2094735a, 0x6e875501) + .FinishEntry(); + + PERHAPS_WRITE_DEBUG_FRAME_FILE("BadVersion", section); + + EXPECT_CALL(reporter, UnrecognizedVersion(_, 0x52)).WillOnce(Return()); + + { + InSequence s; + // We should see no mention of the first FDE, but we should get + // a call to Entry for the second. + EXPECT_CALL(handler, Entry(_, 0x2094735a, 0x6e875501, 3, "", 0x96cb3264)) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + } + + string contents; + EXPECT_TRUE(section.GetContents(&contents)); + ByteReader reader(ENDIANNESS_BIG); + reader.SetAddressSize(4); + CallFrameInfo parser(contents.data(), contents.size(), &reader, &handler, + &reporter); + EXPECT_FALSE(parser.Start()); +} + +// An FDE whose CIE specifies an augmentation we don't recognize. +TEST_F(LulDwarfCFI, BadAugmentation) { + CFISection section(kBigEndian, 4); + Label cie1, cie2; + section.Mark(&cie1) + .CIEHeader(0x4be22f75, 0x2492236e, 0x6b6efb87, 3, "spaniels!") + .FinishEntry() + // We should skip this entry, as its CIE specifies an + // augmentation we don't recognize. + .FDEHeader(cie1, 0x7714740d, 0x3d5a10cd) + .FinishEntry() + // Despite the above, we should visit this entry. + .Mark(&cie2) + .CIEHeader(0xf8bc4399, 0x8cf09931, 0xf2f519b2, 3, "") + .FinishEntry() + .FDEHeader(cie2, 0x7bf0fda0, 0xcbcd28d8) + .FinishEntry(); + + PERHAPS_WRITE_DEBUG_FRAME_FILE("BadAugmentation", section); + + EXPECT_CALL(reporter, UnrecognizedAugmentation(_, "spaniels!")) + .WillOnce(Return()); + + { + InSequence s; + // We should see no mention of the first FDE, but we should get + // a call to Entry for the second. + EXPECT_CALL(handler, Entry(_, 0x7bf0fda0, 0xcbcd28d8, 3, "", 0xf2f519b2)) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + } + + string contents; + EXPECT_TRUE(section.GetContents(&contents)); + ByteReader reader(ENDIANNESS_BIG); + reader.SetAddressSize(4); + CallFrameInfo parser(contents.data(), contents.size(), &reader, &handler, + &reporter); + EXPECT_FALSE(parser.Start()); +} + +// The return address column field is a byte in CFI version 1 +// (DWARF2), but a ULEB128 value in version 3 (DWARF3). +TEST_F(LulDwarfCFI, CIEVersion1ReturnColumn) { + CFISection section(kBigEndian, 4); + Label cie; + section + // CIE, using the version 1 format: return column is a ubyte. + .Mark(&cie) + // Use a value for the return column that is parsed differently + // as a ubyte and as a ULEB128. + .CIEHeader(0xbcdea24f, 0x5be28286, 0x9f, 1, "") + .FinishEntry() + // FDE, citing that CIE. + .FDEHeader(cie, 0xb8d347b5, 0x825e55dc) + .FinishEntry(); + + PERHAPS_WRITE_DEBUG_FRAME_FILE("CIEVersion1ReturnColumn", section); + + { + InSequence s; + EXPECT_CALL(handler, Entry(_, 0xb8d347b5, 0x825e55dc, 1, "", 0x9f)) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + } + + string contents; + EXPECT_TRUE(section.GetContents(&contents)); + ByteReader reader(ENDIANNESS_BIG); + reader.SetAddressSize(4); + CallFrameInfo parser(contents.data(), contents.size(), &reader, &handler, + &reporter); + EXPECT_TRUE(parser.Start()); +} + +// The return address column field is a byte in CFI version 1 +// (DWARF2), but a ULEB128 value in version 3 (DWARF3). +TEST_F(LulDwarfCFI, CIEVersion3ReturnColumn) { + CFISection section(kBigEndian, 4); + Label cie; + section + // CIE, using the version 3 format: return column is a ULEB128. + .Mark(&cie) + // Use a value for the return column that is parsed differently + // as a ubyte and as a ULEB128. + .CIEHeader(0x0ab4758d, 0xc010fdf7, 0x89, 3, "") + .FinishEntry() + // FDE, citing that CIE. + .FDEHeader(cie, 0x86763f2b, 0x2a66dc23) + .FinishEntry(); + + PERHAPS_WRITE_DEBUG_FRAME_FILE("CIEVersion3ReturnColumn", section); + + { + InSequence s; + EXPECT_CALL(handler, Entry(_, 0x86763f2b, 0x2a66dc23, 3, "", 0x89)) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + } + + string contents; + EXPECT_TRUE(section.GetContents(&contents)); + ByteReader reader(ENDIANNESS_BIG); + reader.SetAddressSize(4); + CallFrameInfo parser(contents.data(), contents.size(), &reader, &handler, + &reporter); + EXPECT_TRUE(parser.Start()); +} + +struct CFIInsnFixture : public CFIFixture { + CFIInsnFixture() : CFIFixture() { + data_factor = 0xb6f; + return_register = 0x9be1ed9f; + version = 3; + cfa_base_register = 0x383a3aa; + cfa_offset = 0xf748; + } + + // Prepare SECTION to receive FDE instructions. + // + // - Append a stock CIE header that establishes the fixture's + // code_factor, data_factor, return_register, version, and + // augmentation values. + // - Have the CIE set up a CFA rule using cfa_base_register and + // cfa_offset. + // - Append a stock FDE header, referring to the above CIE, for the + // fde_size bytes at fde_start. Choose fde_start and fde_size + // appropriately for the section's address size. + // - Set appropriate expectations on handler in sequence s for the + // frame description entry and the CIE's CFA rule. + // + // On return, SECTION is ready to have FDE instructions appended to + // it, and its FinishEntry member called. + void StockCIEAndFDE(CFISection* section) { + // Choose appropriate constants for our address size. + if (section->AddressSize() == 4) { + fde_start = 0xc628ecfbU; + fde_size = 0x5dee04a2; + code_factor = 0x60b; + } else { + assert(section->AddressSize() == 8); + fde_start = 0x0005c57ce7806bd3ULL; + fde_size = 0x2699521b5e333100ULL; + code_factor = 0x01008e32855274a8ULL; + } + + // Create the CIE. + (*section) + .Mark(&cie_label) + .CIEHeader(code_factor, data_factor, return_register, version, "") + .D8(lul::DW_CFA_def_cfa) + .ULEB128(cfa_base_register) + .ULEB128(cfa_offset) + .FinishEntry(); + + // Create the FDE. + section->FDEHeader(cie_label, fde_start, fde_size); + + // Expect an Entry call for the FDE and a ValOffsetRule call for the + // CIE's CFA rule. + EXPECT_CALL(handler, + Entry(_, fde_start, fde_size, version, "", return_register)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, ValOffsetRule(fde_start, kCFARegister, + cfa_base_register, cfa_offset)) + .InSequence(s) + .WillOnce(Return(true)); + } + + // Run the contents of SECTION through a CallFrameInfo parser, + // expecting parser.Start to return SUCCEEDS. Caller may optionally + // supply, via READER, its own ByteReader. If that's absent, a + // local one is used. + void ParseSection(CFISection* section, bool succeeds = true, + ByteReader* reader = nullptr) { + string contents; + EXPECT_TRUE(section->GetContents(&contents)); + lul::Endianness endianness; + if (section->endianness() == kBigEndian) + endianness = ENDIANNESS_BIG; + else { + assert(section->endianness() == kLittleEndian); + endianness = ENDIANNESS_LITTLE; + } + ByteReader local_reader(endianness); + ByteReader* reader_to_use = reader ? reader : &local_reader; + reader_to_use->SetAddressSize(section->AddressSize()); + CallFrameInfo parser(contents.data(), contents.size(), reader_to_use, + &handler, &reporter); + if (succeeds) + EXPECT_TRUE(parser.Start()); + else + EXPECT_FALSE(parser.Start()); + } + + Label cie_label; + Sequence s; + uint64 code_factor; + int data_factor; + unsigned return_register; + unsigned version; + unsigned cfa_base_register; + int cfa_offset; + uint64 fde_start, fde_size; +}; + +class LulDwarfCFIInsn : public CFIInsnFixture, public Test {}; + +TEST_F(LulDwarfCFIInsn, DW_CFA_set_loc) { + CFISection section(kBigEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_set_loc) + .D32(0xb1ee3e7a) + // Use DW_CFA_def_cfa to force a handler call that we can use to + // check the effect of the DW_CFA_set_loc. + .D8(lul::DW_CFA_def_cfa) + .ULEB128(0x4defb431) + .ULEB128(0x6d17b0ee) + .FinishEntry(); + + PERHAPS_WRITE_DEBUG_FRAME_FILE("DW_CFA_set_loc", section); + + EXPECT_CALL(handler, + ValOffsetRule(0xb1ee3e7a, kCFARegister, 0x4defb431, 0x6d17b0ee)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_advance_loc) { + CFISection section(kBigEndian, 8); + StockCIEAndFDE(§ion); + section + .D8(lul::DW_CFA_advance_loc | 0x2a) + // Use DW_CFA_def_cfa to force a handler call that we can use to + // check the effect of the DW_CFA_advance_loc. + .D8(lul::DW_CFA_def_cfa) + .ULEB128(0x5bbb3715) + .ULEB128(0x0186c7bf) + .FinishEntry(); + + PERHAPS_WRITE_DEBUG_FRAME_FILE("DW_CFA_advance_loc", section); + + EXPECT_CALL(handler, ValOffsetRule(fde_start + 0x2a * code_factor, + kCFARegister, 0x5bbb3715, 0x0186c7bf)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_advance_loc1) { + CFISection section(kLittleEndian, 8); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_advance_loc1) + .D8(0xd8) + .D8(lul::DW_CFA_def_cfa) + .ULEB128(0x69d5696a) + .ULEB128(0x1eb7fc93) + .FinishEntry(); + + PERHAPS_WRITE_DEBUG_FRAME_FILE("DW_CFA_advance_loc1", section); + + EXPECT_CALL(handler, ValOffsetRule((fde_start + 0xd8 * code_factor), + kCFARegister, 0x69d5696a, 0x1eb7fc93)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_advance_loc2) { + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_advance_loc2) + .D16(0x3adb) + .D8(lul::DW_CFA_def_cfa) + .ULEB128(0x3a368bed) + .ULEB128(0x3194ee37) + .FinishEntry(); + + PERHAPS_WRITE_DEBUG_FRAME_FILE("DW_CFA_advance_loc2", section); + + EXPECT_CALL(handler, ValOffsetRule((fde_start + 0x3adb * code_factor), + kCFARegister, 0x3a368bed, 0x3194ee37)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_advance_loc4) { + CFISection section(kBigEndian, 8); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_advance_loc4) + .D32(0x15813c88) + .D8(lul::DW_CFA_def_cfa) + .ULEB128(0x135270c5) + .ULEB128(0x24bad7cb) + .FinishEntry(); + + PERHAPS_WRITE_DEBUG_FRAME_FILE("DW_CFA_advance_loc4", section); + + EXPECT_CALL(handler, ValOffsetRule((fde_start + 0x15813c88ULL * code_factor), + kCFARegister, 0x135270c5, 0x24bad7cb)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_MIPS_advance_loc8) { + code_factor = 0x2d; + CFISection section(kBigEndian, 8); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_MIPS_advance_loc8) + .D64(0x3c4f3945b92c14ULL) + .D8(lul::DW_CFA_def_cfa) + .ULEB128(0xe17ed602) + .ULEB128(0x3d162e7f) + .FinishEntry(); + + PERHAPS_WRITE_DEBUG_FRAME_FILE("DW_CFA_advance_loc8", section); + + EXPECT_CALL(handler, + ValOffsetRule((fde_start + 0x3c4f3945b92c14ULL * code_factor), + kCFARegister, 0xe17ed602, 0x3d162e7f)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_def_cfa) { + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_def_cfa) + .ULEB128(0x4e363a85) + .ULEB128(0x815f9aa7) + .FinishEntry(); + + PERHAPS_WRITE_DEBUG_FRAME_FILE("DW_CFA_def_cfa", section); + + EXPECT_CALL(handler, + ValOffsetRule(fde_start, kCFARegister, 0x4e363a85, 0x815f9aa7)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_def_cfa_sf) { + CFISection section(kBigEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_def_cfa_sf) + .ULEB128(0x8ccb32b7) + .LEB128(0x9ea) + .D8(lul::DW_CFA_def_cfa_sf) + .ULEB128(0x9b40f5da) + .LEB128(-0x40a2) + .FinishEntry(); + + EXPECT_CALL(handler, ValOffsetRule(fde_start, kCFARegister, 0x8ccb32b7, + 0x9ea * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, ValOffsetRule(fde_start, kCFARegister, 0x9b40f5da, + -0x40a2 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_def_cfa_register) { + CFISection section(kLittleEndian, 8); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_def_cfa_register).ULEB128(0x3e7e9363).FinishEntry(); + + EXPECT_CALL(handler, + ValOffsetRule(fde_start, kCFARegister, 0x3e7e9363, cfa_offset)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +// DW_CFA_def_cfa_register should have no effect when applied to a +// non-base/offset rule. +TEST_F(LulDwarfCFIInsn, DW_CFA_def_cfa_registerBadRule) { + ByteReader reader(ENDIANNESS_BIG); + CFISection section(kBigEndian, 4); + ImageSlice expr("needle in a haystack"); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_def_cfa_expression) + .Block(expr) + .D8(lul::DW_CFA_def_cfa_register) + .ULEB128(0xf1b49e49) + .FinishEntry(); + + EXPECT_CALL(handler, ValExpressionRule(fde_start, kCFARegister, expr)) + .WillRepeatedly(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion, true, &reader); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_def_cfa_offset) { + CFISection section(kBigEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_def_cfa_offset).ULEB128(0x1e8e3b9b).FinishEntry(); + + EXPECT_CALL(handler, ValOffsetRule(fde_start, kCFARegister, cfa_base_register, + 0x1e8e3b9b)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_def_cfa_offset_sf) { + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_def_cfa_offset_sf) + .LEB128(0x970) + .D8(lul::DW_CFA_def_cfa_offset_sf) + .LEB128(-0x2cd) + .FinishEntry(); + + EXPECT_CALL(handler, ValOffsetRule(fde_start, kCFARegister, cfa_base_register, + 0x970 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, ValOffsetRule(fde_start, kCFARegister, cfa_base_register, + -0x2cd * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +// DW_CFA_def_cfa_offset should have no effect when applied to a +// non-base/offset rule. +TEST_F(LulDwarfCFIInsn, DW_CFA_def_cfa_offsetBadRule) { + ByteReader reader(ENDIANNESS_BIG); + CFISection section(kBigEndian, 4); + StockCIEAndFDE(§ion); + ImageSlice expr("six ways to Sunday"); + section.D8(lul::DW_CFA_def_cfa_expression) + .Block(expr) + .D8(lul::DW_CFA_def_cfa_offset) + .ULEB128(0x1e8e3b9b) + .FinishEntry(); + + EXPECT_CALL(handler, ValExpressionRule(fde_start, kCFARegister, expr)) + .WillRepeatedly(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion, true, &reader); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_def_cfa_expression) { + ByteReader reader(ENDIANNESS_LITTLE); + CFISection section(kLittleEndian, 8); + ImageSlice expr("eating crow"); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_def_cfa_expression).Block(expr).FinishEntry(); + + EXPECT_CALL(handler, ValExpressionRule(fde_start, kCFARegister, expr)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion, true, &reader); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_undefined) { + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_undefined).ULEB128(0x300ce45d).FinishEntry(); + + EXPECT_CALL(handler, UndefinedRule(fde_start, 0x300ce45d)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_same_value) { + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_same_value).ULEB128(0x3865a760).FinishEntry(); + + EXPECT_CALL(handler, SameValueRule(fde_start, 0x3865a760)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_offset) { + CFISection section(kBigEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_offset | 0x2c).ULEB128(0x9f6).FinishEntry(); + + EXPECT_CALL(handler, + OffsetRule(fde_start, 0x2c, kCFARegister, 0x9f6 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_offset_extended) { + CFISection section(kBigEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_offset_extended) + .ULEB128(0x402b) + .ULEB128(0xb48) + .FinishEntry(); + + EXPECT_CALL(handler, + OffsetRule(fde_start, 0x402b, kCFARegister, 0xb48 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_offset_extended_sf) { + CFISection section(kBigEndian, 8); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_offset_extended_sf) + .ULEB128(0x997c23ee) + .LEB128(0x2d00) + .D8(lul::DW_CFA_offset_extended_sf) + .ULEB128(0x9519eb82) + .LEB128(-0xa77) + .FinishEntry(); + + EXPECT_CALL(handler, OffsetRule(fde_start, 0x997c23ee, kCFARegister, + 0x2d00 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, OffsetRule(fde_start, 0x9519eb82, kCFARegister, + -0xa77 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_val_offset) { + CFISection section(kBigEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_val_offset) + .ULEB128(0x623562fe) + .ULEB128(0x673) + .FinishEntry(); + + EXPECT_CALL(handler, ValOffsetRule(fde_start, 0x623562fe, kCFARegister, + 0x673 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_val_offset_sf) { + CFISection section(kBigEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_val_offset_sf) + .ULEB128(0x6f4f) + .LEB128(0xaab) + .D8(lul::DW_CFA_val_offset_sf) + .ULEB128(0x2483) + .LEB128(-0x8a2) + .FinishEntry(); + + EXPECT_CALL(handler, ValOffsetRule(fde_start, 0x6f4f, kCFARegister, + 0xaab * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, ValOffsetRule(fde_start, 0x2483, kCFARegister, + -0x8a2 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_register) { + CFISection section(kLittleEndian, 8); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_register) + .ULEB128(0x278d18f9) + .ULEB128(0x1a684414) + .FinishEntry(); + + EXPECT_CALL(handler, RegisterRule(fde_start, 0x278d18f9, 0x1a684414)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_expression) { + ByteReader reader(ENDIANNESS_BIG); + CFISection section(kBigEndian, 8); + StockCIEAndFDE(§ion); + ImageSlice expr("plus ça change, plus c'est la même chose"); + section.D8(lul::DW_CFA_expression) + .ULEB128(0xa1619fb2) + .Block(expr) + .FinishEntry(); + + EXPECT_CALL(handler, ExpressionRule(fde_start, 0xa1619fb2, expr)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion, true, &reader); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_val_expression) { + ByteReader reader(ENDIANNESS_BIG); + CFISection section(kBigEndian, 4); + ImageSlice expr("he who has the gold makes the rules"); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_val_expression) + .ULEB128(0xc5e4a9e3) + .Block(expr) + .FinishEntry(); + + EXPECT_CALL(handler, ValExpressionRule(fde_start, 0xc5e4a9e3, expr)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion, true, &reader); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_restore) { + CFISection section(kLittleEndian, 8); + code_factor = 0x01bd188a9b1fa083ULL; + data_factor = -0x1ac8; + return_register = 0x8c35b049; + version = 2; + fde_start = 0x2d70fe998298bbb1ULL; + fde_size = 0x46ccc2e63cf0b108ULL; + Label cie; + section.Mark(&cie) + .CIEHeader(code_factor, data_factor, return_register, version, "") + // Provide a CFA rule, because register rules require them. + .D8(lul::DW_CFA_def_cfa) + .ULEB128(0x6ca1d50e) + .ULEB128(0x372e38e8) + // Provide an offset(N) rule for register 0x3c. + .D8(lul::DW_CFA_offset | 0x3c) + .ULEB128(0xb348) + .FinishEntry() + // In the FDE... + .FDEHeader(cie, fde_start, fde_size) + // At a second address, provide a new offset(N) rule for register 0x3c. + .D8(lul::DW_CFA_advance_loc | 0x13) + .D8(lul::DW_CFA_offset | 0x3c) + .ULEB128(0x9a50) + // At a third address, restore the original rule for register 0x3c. + .D8(lul::DW_CFA_advance_loc | 0x01) + .D8(lul::DW_CFA_restore | 0x3c) + .FinishEntry(); + + { + InSequence s; + EXPECT_CALL(handler, + Entry(_, fde_start, fde_size, version, "", return_register)) + .WillOnce(Return(true)); + // CIE's CFA rule. + EXPECT_CALL(handler, + ValOffsetRule(fde_start, kCFARegister, 0x6ca1d50e, 0x372e38e8)) + .WillOnce(Return(true)); + // CIE's rule for register 0x3c. + EXPECT_CALL(handler, + OffsetRule(fde_start, 0x3c, kCFARegister, 0xb348 * data_factor)) + .WillOnce(Return(true)); + // FDE's rule for register 0x3c. + EXPECT_CALL(handler, OffsetRule(fde_start + 0x13 * code_factor, 0x3c, + kCFARegister, 0x9a50 * data_factor)) + .WillOnce(Return(true)); + // Restore CIE's rule for register 0x3c. + EXPECT_CALL(handler, OffsetRule(fde_start + (0x13 + 0x01) * code_factor, + 0x3c, kCFARegister, 0xb348 * data_factor)) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + } + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_restoreNoRule) { + CFISection section(kBigEndian, 4); + code_factor = 0x005f78143c1c3b82ULL; + data_factor = 0x25d0; + return_register = 0xe8; + version = 1; + fde_start = 0x4062e30f; + fde_size = 0x5302a389; + Label cie; + section.Mark(&cie) + .CIEHeader(code_factor, data_factor, return_register, version, "") + // Provide a CFA rule, because register rules require them. + .D8(lul::DW_CFA_def_cfa) + .ULEB128(0x470aa334) + .ULEB128(0x099ef127) + .FinishEntry() + // In the FDE... + .FDEHeader(cie, fde_start, fde_size) + // At a second address, provide an offset(N) rule for register 0x2c. + .D8(lul::DW_CFA_advance_loc | 0x7) + .D8(lul::DW_CFA_offset | 0x2c) + .ULEB128(0x1f47) + // At a third address, restore the (missing) CIE rule for register 0x2c. + .D8(lul::DW_CFA_advance_loc | 0xb) + .D8(lul::DW_CFA_restore | 0x2c) + .FinishEntry(); + + { + InSequence s; + EXPECT_CALL(handler, + Entry(_, fde_start, fde_size, version, "", return_register)) + .WillOnce(Return(true)); + // CIE's CFA rule. + EXPECT_CALL(handler, + ValOffsetRule(fde_start, kCFARegister, 0x470aa334, 0x099ef127)) + .WillOnce(Return(true)); + // FDE's rule for register 0x2c. + EXPECT_CALL(handler, OffsetRule(fde_start + 0x7 * code_factor, 0x2c, + kCFARegister, 0x1f47 * data_factor)) + .WillOnce(Return(true)); + // Restore CIE's (missing) rule for register 0x2c. + EXPECT_CALL(handler, + SameValueRule(fde_start + (0x7 + 0xb) * code_factor, 0x2c)) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + } + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_restore_extended) { + CFISection section(kBigEndian, 4); + code_factor = 0x126e; + data_factor = -0xd8b; + return_register = 0x77711787; + version = 3; + fde_start = 0x01f55a45; + fde_size = 0x452adb80; + Label cie; + section.Mark(&cie) + .CIEHeader(code_factor, data_factor, return_register, version, "", + true /* dwarf64 */) + // Provide a CFA rule, because register rules require them. + .D8(lul::DW_CFA_def_cfa) + .ULEB128(0x56fa0edd) + .ULEB128(0x097f78a5) + // Provide an offset(N) rule for register 0x0f9b8a1c. + .D8(lul::DW_CFA_offset_extended) + .ULEB128(0x0f9b8a1c) + .ULEB128(0xc979) + .FinishEntry() + // In the FDE... + .FDEHeader(cie, fde_start, fde_size) + // At a second address, provide a new offset(N) rule for reg 0x0f9b8a1c. + .D8(lul::DW_CFA_advance_loc | 0x3) + .D8(lul::DW_CFA_offset_extended) + .ULEB128(0x0f9b8a1c) + .ULEB128(0x3b7b) + // At a third address, restore the original rule for register 0x0f9b8a1c. + .D8(lul::DW_CFA_advance_loc | 0x04) + .D8(lul::DW_CFA_restore_extended) + .ULEB128(0x0f9b8a1c) + .FinishEntry(); + + { + InSequence s; + EXPECT_CALL(handler, + Entry(_, fde_start, fde_size, version, "", return_register)) + .WillOnce(Return(true)); + // CIE's CFA rule. + EXPECT_CALL(handler, + ValOffsetRule(fde_start, kCFARegister, 0x56fa0edd, 0x097f78a5)) + .WillOnce(Return(true)); + // CIE's rule for register 0x0f9b8a1c. + EXPECT_CALL(handler, OffsetRule(fde_start, 0x0f9b8a1c, kCFARegister, + 0xc979 * data_factor)) + .WillOnce(Return(true)); + // FDE's rule for register 0x0f9b8a1c. + EXPECT_CALL(handler, OffsetRule(fde_start + 0x3 * code_factor, 0x0f9b8a1c, + kCFARegister, 0x3b7b * data_factor)) + .WillOnce(Return(true)); + // Restore CIE's rule for register 0x0f9b8a1c. + EXPECT_CALL(handler, + OffsetRule(fde_start + (0x3 + 0x4) * code_factor, 0x0f9b8a1c, + kCFARegister, 0xc979 * data_factor)) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + } + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_remember_and_restore_state) { + CFISection section(kLittleEndian, 8); + StockCIEAndFDE(§ion); + + // We create a state, save it, modify it, and then restore. We + // refer to the state that is overridden the restore as the + // "outgoing" state, and the restored state the "incoming" state. + // + // Register outgoing incoming expect + // 1 offset(N) no rule new "same value" rule + // 2 register(R) offset(N) report changed rule + // 3 offset(N) offset(M) report changed offset + // 4 offset(N) offset(N) no report + // 5 offset(N) no rule new "same value" rule + section + // Create the "incoming" state, which we will save and later restore. + .D8(lul::DW_CFA_offset | 2) + .ULEB128(0x9806) + .D8(lul::DW_CFA_offset | 3) + .ULEB128(0x995d) + .D8(lul::DW_CFA_offset | 4) + .ULEB128(0x7055) + .D8(lul::DW_CFA_remember_state) + // Advance to a new instruction; an implementation could legitimately + // ignore all but the final rule for a given register at a given address. + .D8(lul::DW_CFA_advance_loc | 1) + // Create the "outgoing" state, which we will discard. + .D8(lul::DW_CFA_offset | 1) + .ULEB128(0xea1a) + .D8(lul::DW_CFA_register) + .ULEB128(2) + .ULEB128(0x1d2a3767) + .D8(lul::DW_CFA_offset | 3) + .ULEB128(0xdd29) + .D8(lul::DW_CFA_offset | 5) + .ULEB128(0xf1ce) + // At a third address, restore the incoming state. + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_restore_state) + .FinishEntry(); + + uint64 addr = fde_start; + + // Expect the incoming rules to be reported. + EXPECT_CALL(handler, OffsetRule(addr, 2, kCFARegister, 0x9806 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, OffsetRule(addr, 3, kCFARegister, 0x995d * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, OffsetRule(addr, 4, kCFARegister, 0x7055 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + + addr += code_factor; + + // After the save, we establish the outgoing rule set. + EXPECT_CALL(handler, OffsetRule(addr, 1, kCFARegister, 0xea1a * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, RegisterRule(addr, 2, 0x1d2a3767)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, OffsetRule(addr, 3, kCFARegister, 0xdd29 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, OffsetRule(addr, 5, kCFARegister, 0xf1ce * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + + addr += code_factor; + + // Finally, after the restore, expect to see the differences from + // the outgoing to the incoming rules reported. + EXPECT_CALL(handler, SameValueRule(addr, 1)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, OffsetRule(addr, 2, kCFARegister, 0x9806 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, OffsetRule(addr, 3, kCFARegister, 0x995d * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, SameValueRule(addr, 5)) + .InSequence(s) + .WillOnce(Return(true)); + + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + + ParseSection(§ion); +} + +// Check that restoring a rule set reports changes to the CFA rule. +TEST_F(LulDwarfCFIInsn, DW_CFA_remember_and_restore_stateCFA) { + CFISection section(kBigEndian, 4); + StockCIEAndFDE(§ion); + + section.D8(lul::DW_CFA_remember_state) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_def_cfa_offset) + .ULEB128(0x90481102) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_restore_state) + .FinishEntry(); + + EXPECT_CALL(handler, ValOffsetRule(fde_start + code_factor, kCFARegister, + cfa_base_register, 0x90481102)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, ValOffsetRule(fde_start + code_factor * 2, kCFARegister, + cfa_base_register, cfa_offset)) + .InSequence(s) + .WillOnce(Return(true)); + + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_nop) { + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_nop) + .D8(lul::DW_CFA_def_cfa) + .ULEB128(0x3fb8d4f1) + .ULEB128(0x078dc67b) + .D8(lul::DW_CFA_nop) + .FinishEntry(); + + EXPECT_CALL(handler, + ValOffsetRule(fde_start, kCFARegister, 0x3fb8d4f1, 0x078dc67b)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_GNU_window_save) { + CFISection section(kBigEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_GNU_window_save).FinishEntry(); + + // Don't include all the rules in any particular sequence. + + // The caller's %o0-%o7 have become the callee's %i0-%i7. This is + // the GCC register numbering. + for (int i = 8; i < 16; i++) + EXPECT_CALL(handler, RegisterRule(fde_start, i, i + 16)) + .WillOnce(Return(true)); + // The caller's %l0-%l7 and %i0-%i7 have been saved at the top of + // its frame. + for (int i = 16; i < 32; i++) + EXPECT_CALL(handler, OffsetRule(fde_start, i, kCFARegister, (i - 16) * 4)) + .WillOnce(Return(true)); + + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_GNU_args_size) { + CFISection section(kLittleEndian, 8); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_GNU_args_size) + .ULEB128(0xeddfa520) + // Verify that we see this, meaning we parsed the above properly. + .D8(lul::DW_CFA_offset | 0x23) + .ULEB128(0x269) + .FinishEntry(); + + EXPECT_CALL(handler, + OffsetRule(fde_start, 0x23, kCFARegister, 0x269 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_GNU_negative_offset_extended) { + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_GNU_negative_offset_extended) + .ULEB128(0x430cc87a) + .ULEB128(0x613) + .FinishEntry(); + + EXPECT_CALL(handler, OffsetRule(fde_start, 0x430cc87a, kCFARegister, + -0x613 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +// Three FDEs: skip the second +TEST_F(LulDwarfCFIInsn, SkipFDE) { + CFISection section(kBigEndian, 4); + Label cie; + section + // CIE, used by all FDEs. + .Mark(&cie) + .CIEHeader(0x010269f2, 0x9177, 0xedca5849, 2, "") + .D8(lul::DW_CFA_def_cfa) + .ULEB128(0x42ed390b) + .ULEB128(0x98f43aad) + .FinishEntry() + // First FDE. + .FDEHeader(cie, 0xa870ebdd, 0x60f6aa4) + .D8(lul::DW_CFA_register) + .ULEB128(0x3a860351) + .ULEB128(0x6c9a6bcf) + .FinishEntry() + // Second FDE. + .FDEHeader(cie, 0xc534f7c0, 0xf6552e9, true /* dwarf64 */) + .D8(lul::DW_CFA_register) + .ULEB128(0x1b62c234) + .ULEB128(0x26586b18) + .FinishEntry() + // Third FDE. + .FDEHeader(cie, 0xf681cfc8, 0x7e4594e) + .D8(lul::DW_CFA_register) + .ULEB128(0x26c53934) + .ULEB128(0x18eeb8a4) + .FinishEntry(); + + { + InSequence s; + + // Process the first FDE. + EXPECT_CALL(handler, Entry(_, 0xa870ebdd, 0x60f6aa4, 2, "", 0xedca5849)) + .WillOnce(Return(true)); + EXPECT_CALL(handler, + ValOffsetRule(0xa870ebdd, kCFARegister, 0x42ed390b, 0x98f43aad)) + .WillOnce(Return(true)); + EXPECT_CALL(handler, RegisterRule(0xa870ebdd, 0x3a860351, 0x6c9a6bcf)) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + + // Skip the second FDE. + EXPECT_CALL(handler, Entry(_, 0xc534f7c0, 0xf6552e9, 2, "", 0xedca5849)) + .WillOnce(Return(false)); + + // Process the third FDE. + EXPECT_CALL(handler, Entry(_, 0xf681cfc8, 0x7e4594e, 2, "", 0xedca5849)) + .WillOnce(Return(true)); + EXPECT_CALL(handler, + ValOffsetRule(0xf681cfc8, kCFARegister, 0x42ed390b, 0x98f43aad)) + .WillOnce(Return(true)); + EXPECT_CALL(handler, RegisterRule(0xf681cfc8, 0x26c53934, 0x18eeb8a4)) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + } + + ParseSection(§ion); +} + +// Quit processing in the middle of an entry's instructions. +TEST_F(LulDwarfCFIInsn, QuitMidentry) { + CFISection section(kLittleEndian, 8); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_register) + .ULEB128(0xe0cf850d) + .ULEB128(0x15aab431) + .D8(lul::DW_CFA_expression) + .ULEB128(0x46750aa5) + .Block("meat") + .FinishEntry(); + + EXPECT_CALL(handler, RegisterRule(fde_start, 0xe0cf850d, 0x15aab431)) + .InSequence(s) + .WillOnce(Return(false)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion, false); +} + +class LulDwarfCFIRestore : public CFIInsnFixture, public Test {}; + +TEST_F(LulDwarfCFIRestore, RestoreUndefinedRuleUnchanged) { + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_undefined) + .ULEB128(0x0bac878e) + .D8(lul::DW_CFA_remember_state) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_restore_state) + .FinishEntry(); + + EXPECT_CALL(handler, UndefinedRule(fde_start, 0x0bac878e)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIRestore, RestoreUndefinedRuleChanged) { + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_undefined) + .ULEB128(0x7dedff5f) + .D8(lul::DW_CFA_remember_state) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_same_value) + .ULEB128(0x7dedff5f) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_restore_state) + .FinishEntry(); + + EXPECT_CALL(handler, UndefinedRule(fde_start, 0x7dedff5f)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, SameValueRule(fde_start + code_factor, 0x7dedff5f)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, UndefinedRule(fde_start + 2 * code_factor, 0x7dedff5f)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIRestore, RestoreSameValueRuleUnchanged) { + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_same_value) + .ULEB128(0xadbc9b3a) + .D8(lul::DW_CFA_remember_state) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_restore_state) + .FinishEntry(); + + EXPECT_CALL(handler, SameValueRule(fde_start, 0xadbc9b3a)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIRestore, RestoreSameValueRuleChanged) { + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_same_value) + .ULEB128(0x3d90dcb5) + .D8(lul::DW_CFA_remember_state) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_undefined) + .ULEB128(0x3d90dcb5) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_restore_state) + .FinishEntry(); + + EXPECT_CALL(handler, SameValueRule(fde_start, 0x3d90dcb5)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, UndefinedRule(fde_start + code_factor, 0x3d90dcb5)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, SameValueRule(fde_start + 2 * code_factor, 0x3d90dcb5)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIRestore, RestoreOffsetRuleUnchanged) { + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_offset | 0x14) + .ULEB128(0xb6f) + .D8(lul::DW_CFA_remember_state) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_restore_state) + .FinishEntry(); + + EXPECT_CALL(handler, + OffsetRule(fde_start, 0x14, kCFARegister, 0xb6f * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIRestore, RestoreOffsetRuleChanged) { + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_offset | 0x21) + .ULEB128(0xeb7) + .D8(lul::DW_CFA_remember_state) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_undefined) + .ULEB128(0x21) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_restore_state) + .FinishEntry(); + + EXPECT_CALL(handler, + OffsetRule(fde_start, 0x21, kCFARegister, 0xeb7 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, UndefinedRule(fde_start + code_factor, 0x21)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, OffsetRule(fde_start + 2 * code_factor, 0x21, + kCFARegister, 0xeb7 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIRestore, RestoreOffsetRuleChangedOffset) { + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_offset | 0x21) + .ULEB128(0x134) + .D8(lul::DW_CFA_remember_state) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_offset | 0x21) + .ULEB128(0xf4f) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_restore_state) + .FinishEntry(); + + EXPECT_CALL(handler, + OffsetRule(fde_start, 0x21, kCFARegister, 0x134 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, OffsetRule(fde_start + code_factor, 0x21, kCFARegister, + 0xf4f * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, OffsetRule(fde_start + 2 * code_factor, 0x21, + kCFARegister, 0x134 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIRestore, RestoreValOffsetRuleUnchanged) { + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_val_offset) + .ULEB128(0x829caee6) + .ULEB128(0xe4c) + .D8(lul::DW_CFA_remember_state) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_restore_state) + .FinishEntry(); + + EXPECT_CALL(handler, ValOffsetRule(fde_start, 0x829caee6, kCFARegister, + 0xe4c * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIRestore, RestoreValOffsetRuleChanged) { + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_val_offset) + .ULEB128(0xf17c36d6) + .ULEB128(0xeb7) + .D8(lul::DW_CFA_remember_state) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_undefined) + .ULEB128(0xf17c36d6) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_restore_state) + .FinishEntry(); + + EXPECT_CALL(handler, ValOffsetRule(fde_start, 0xf17c36d6, kCFARegister, + 0xeb7 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, UndefinedRule(fde_start + code_factor, 0xf17c36d6)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, ValOffsetRule(fde_start + 2 * code_factor, 0xf17c36d6, + kCFARegister, 0xeb7 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIRestore, RestoreValOffsetRuleChangedValOffset) { + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_val_offset) + .ULEB128(0x2cf0ab1b) + .ULEB128(0x562) + .D8(lul::DW_CFA_remember_state) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_val_offset) + .ULEB128(0x2cf0ab1b) + .ULEB128(0xe88) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_restore_state) + .FinishEntry(); + + EXPECT_CALL(handler, ValOffsetRule(fde_start, 0x2cf0ab1b, kCFARegister, + 0x562 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, ValOffsetRule(fde_start + code_factor, 0x2cf0ab1b, + kCFARegister, 0xe88 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, ValOffsetRule(fde_start + 2 * code_factor, 0x2cf0ab1b, + kCFARegister, 0x562 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIRestore, RestoreRegisterRuleUnchanged) { + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_register) + .ULEB128(0x77514acc) + .ULEB128(0x464de4ce) + .D8(lul::DW_CFA_remember_state) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_restore_state) + .FinishEntry(); + + EXPECT_CALL(handler, RegisterRule(fde_start, 0x77514acc, 0x464de4ce)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIRestore, RestoreRegisterRuleChanged) { + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_register) + .ULEB128(0xe39acce5) + .ULEB128(0x095f1559) + .D8(lul::DW_CFA_remember_state) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_undefined) + .ULEB128(0xe39acce5) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_restore_state) + .FinishEntry(); + + EXPECT_CALL(handler, RegisterRule(fde_start, 0xe39acce5, 0x095f1559)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, UndefinedRule(fde_start + code_factor, 0xe39acce5)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, + RegisterRule(fde_start + 2 * code_factor, 0xe39acce5, 0x095f1559)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIRestore, RestoreRegisterRuleChangedRegister) { + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_register) + .ULEB128(0xd40e21b1) + .ULEB128(0x16607d6a) + .D8(lul::DW_CFA_remember_state) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_register) + .ULEB128(0xd40e21b1) + .ULEB128(0xbabb4742) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_restore_state) + .FinishEntry(); + + EXPECT_CALL(handler, RegisterRule(fde_start, 0xd40e21b1, 0x16607d6a)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, + RegisterRule(fde_start + code_factor, 0xd40e21b1, 0xbabb4742)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, + RegisterRule(fde_start + 2 * code_factor, 0xd40e21b1, 0x16607d6a)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIRestore, RestoreExpressionRuleUnchanged) { + ByteReader reader(ENDIANNESS_LITTLE); + CFISection section(kLittleEndian, 4); + ImageSlice dwarf("dwarf"); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_expression) + .ULEB128(0x666ae152) + .Block("dwarf") + .D8(lul::DW_CFA_remember_state) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_restore_state) + .FinishEntry(); + + EXPECT_CALL(handler, ExpressionRule(fde_start, 0x666ae152, dwarf)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + + ParseSection(§ion, true, &reader); +} + +TEST_F(LulDwarfCFIRestore, RestoreExpressionRuleChanged) { + ByteReader reader(ENDIANNESS_LITTLE); + CFISection section(kLittleEndian, 4); + ImageSlice elf("elf"); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_expression) + .ULEB128(0xb5ca5c46) + .Block(elf) + .D8(lul::DW_CFA_remember_state) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_undefined) + .ULEB128(0xb5ca5c46) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_restore_state) + .FinishEntry(); + + EXPECT_CALL(handler, ExpressionRule(fde_start, 0xb5ca5c46, elf)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, UndefinedRule(fde_start + code_factor, 0xb5ca5c46)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, + ExpressionRule(fde_start + 2 * code_factor, 0xb5ca5c46, elf)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + + ParseSection(§ion, true, &reader); +} + +TEST_F(LulDwarfCFIRestore, RestoreExpressionRuleChangedExpression) { + ByteReader reader(ENDIANNESS_LITTLE); + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + ImageSlice smurf("smurf"); + ImageSlice orc("orc"); + section.D8(lul::DW_CFA_expression) + .ULEB128(0x500f5739) + .Block(smurf) + .D8(lul::DW_CFA_remember_state) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_expression) + .ULEB128(0x500f5739) + .Block(orc) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_restore_state) + .FinishEntry(); + + EXPECT_CALL(handler, ExpressionRule(fde_start, 0x500f5739, smurf)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, ExpressionRule(fde_start + code_factor, 0x500f5739, orc)) + .InSequence(s) + .WillOnce(Return(true)); + // Expectations are not wishes. + EXPECT_CALL(handler, + ExpressionRule(fde_start + 2 * code_factor, 0x500f5739, smurf)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + + ParseSection(§ion, true, &reader); +} + +TEST_F(LulDwarfCFIRestore, RestoreValExpressionRuleUnchanged) { + ByteReader reader(ENDIANNESS_LITTLE); + CFISection section(kLittleEndian, 4); + ImageSlice hideous("hideous"); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_val_expression) + .ULEB128(0x666ae152) + .Block(hideous) + .D8(lul::DW_CFA_remember_state) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_restore_state) + .FinishEntry(); + + EXPECT_CALL(handler, ValExpressionRule(fde_start, 0x666ae152, hideous)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + + ParseSection(§ion, true, &reader); +} + +TEST_F(LulDwarfCFIRestore, RestoreValExpressionRuleChanged) { + ByteReader reader(ENDIANNESS_LITTLE); + CFISection section(kLittleEndian, 4); + ImageSlice revolting("revolting"); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_val_expression) + .ULEB128(0xb5ca5c46) + .Block(revolting) + .D8(lul::DW_CFA_remember_state) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_undefined) + .ULEB128(0xb5ca5c46) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_restore_state) + .FinishEntry(); + + PERHAPS_WRITE_DEBUG_FRAME_FILE("RestoreValExpressionRuleChanged", section); + + EXPECT_CALL(handler, ValExpressionRule(fde_start, 0xb5ca5c46, revolting)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, UndefinedRule(fde_start + code_factor, 0xb5ca5c46)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, ValExpressionRule(fde_start + 2 * code_factor, + 0xb5ca5c46, revolting)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + + ParseSection(§ion, true, &reader); +} + +TEST_F(LulDwarfCFIRestore, RestoreValExpressionRuleChangedValExpression) { + ByteReader reader(ENDIANNESS_LITTLE); + CFISection section(kLittleEndian, 4); + ImageSlice repulsive("repulsive"); + ImageSlice nauseous("nauseous"); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_val_expression) + .ULEB128(0x500f5739) + .Block(repulsive) + .D8(lul::DW_CFA_remember_state) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_val_expression) + .ULEB128(0x500f5739) + .Block(nauseous) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_restore_state) + .FinishEntry(); + + PERHAPS_WRITE_DEBUG_FRAME_FILE("RestoreValExpressionRuleChangedValExpression", + section); + + EXPECT_CALL(handler, ValExpressionRule(fde_start, 0x500f5739, repulsive)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, + ValExpressionRule(fde_start + code_factor, 0x500f5739, nauseous)) + .InSequence(s) + .WillOnce(Return(true)); + // Expectations are not wishes. + EXPECT_CALL(handler, ValExpressionRule(fde_start + 2 * code_factor, + 0x500f5739, repulsive)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + + ParseSection(§ion, true, &reader); +} + +struct EHFrameFixture : public CFIInsnFixture { + EHFrameFixture() : CFIInsnFixture(), section(kBigEndian, 4, true) { + encoded_pointer_bases.cfi = 0x7f496cb2; + encoded_pointer_bases.text = 0x540f67b6; + encoded_pointer_bases.data = 0xe3eab768; + section.SetEncodedPointerBases(encoded_pointer_bases); + } + CFISection section; + CFISection::EncodedPointerBases encoded_pointer_bases; + + // Parse CFIInsnFixture::ParseSection, but parse the section as + // .eh_frame data, supplying stock base addresses. + void ParseEHFrameSection(CFISection* section, bool succeeds = true) { + EXPECT_TRUE(section->ContainsEHFrame()); + string contents; + EXPECT_TRUE(section->GetContents(&contents)); + lul::Endianness endianness; + if (section->endianness() == kBigEndian) + endianness = ENDIANNESS_BIG; + else { + assert(section->endianness() == kLittleEndian); + endianness = ENDIANNESS_LITTLE; + } + ByteReader reader(endianness); + reader.SetAddressSize(section->AddressSize()); + reader.SetCFIDataBase(encoded_pointer_bases.cfi, contents.data()); + reader.SetTextBase(encoded_pointer_bases.text); + reader.SetDataBase(encoded_pointer_bases.data); + CallFrameInfo parser(contents.data(), contents.size(), &reader, &handler, + &reporter, true); + if (succeeds) + EXPECT_TRUE(parser.Start()); + else + EXPECT_FALSE(parser.Start()); + } +}; + +class LulDwarfEHFrame : public EHFrameFixture, public Test {}; + +// A simple CIE, an FDE, and a terminator. +TEST_F(LulDwarfEHFrame, Terminator) { + Label cie; + section.Mark(&cie) + .CIEHeader(9968, 2466, 67, 1, "") + .D8(lul::DW_CFA_def_cfa) + .ULEB128(3772) + .ULEB128(1372) + .FinishEntry() + .FDEHeader(cie, 0x848037a1, 0x7b30475e) + .D8(lul::DW_CFA_set_loc) + .D32(0x17713850) + .D8(lul::DW_CFA_undefined) + .ULEB128(5721) + .FinishEntry() + .D32(0) // Terminate the sequence. + // This FDE should be ignored. + .FDEHeader(cie, 0xf19629fe, 0x439fb09b) + .FinishEntry(); + + PERHAPS_WRITE_EH_FRAME_FILE("EHFrame.Terminator", section); + + EXPECT_CALL(handler, Entry(_, 0x848037a1, 0x7b30475e, 1, "", 67)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, ValOffsetRule(0x848037a1, kCFARegister, 3772, 1372)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, UndefinedRule(0x17713850, 5721)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + EXPECT_CALL(reporter, EarlyEHTerminator(_)).InSequence(s).WillOnce(Return()); + + ParseEHFrameSection(§ion); +} + +// The parser should recognize the Linux Standards Base 'z' augmentations. +TEST_F(LulDwarfEHFrame, SimpleFDE) { + lul::DwarfPointerEncoding lsda_encoding = lul::DwarfPointerEncoding( + lul::DW_EH_PE_indirect | lul::DW_EH_PE_datarel | lul::DW_EH_PE_sdata2); + lul::DwarfPointerEncoding fde_encoding = + lul::DwarfPointerEncoding(lul::DW_EH_PE_textrel | lul::DW_EH_PE_udata2); + + section.SetPointerEncoding(fde_encoding); + section.SetEncodedPointerBases(encoded_pointer_bases); + Label cie; + section.Mark(&cie) + .CIEHeader(4873, 7012, 100, 1, "zSLPR") + .ULEB128(7) // Augmentation data length + .D8(lsda_encoding) // LSDA pointer format + .D8(lul::DW_EH_PE_pcrel) // personality pointer format + .EncodedPointer(0x97baa00, lul::DW_EH_PE_pcrel) // and value + .D8(fde_encoding) // FDE pointer format + .D8(lul::DW_CFA_def_cfa) + .ULEB128(6706) + .ULEB128(31) + .FinishEntry() + .FDEHeader(cie, 0x540f6b56, 0xf686) + .ULEB128(2) // Augmentation data length + .EncodedPointer(0xe3eab475, lsda_encoding) // LSDA pointer, signed + .D8(lul::DW_CFA_set_loc) + .EncodedPointer(0x540fa4ce, fde_encoding) + .D8(lul::DW_CFA_undefined) + .ULEB128(0x675e) + .FinishEntry() + .D32(0); // terminator + + PERHAPS_WRITE_EH_FRAME_FILE("EHFrame.SimpleFDE", section); + + EXPECT_CALL(handler, Entry(_, 0x540f6b56, 0xf686, 1, "zSLPR", 100)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, PersonalityRoutine(0x97baa00, false)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, LanguageSpecificDataArea(0xe3eab475, true)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, SignalHandler()).InSequence(s).WillOnce(Return(true)); + EXPECT_CALL(handler, ValOffsetRule(0x540f6b56, kCFARegister, 6706, 31)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, UndefinedRule(0x540fa4ce, 0x675e)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseEHFrameSection(§ion); +} + +// Check that we can handle an empty 'z' augmentation. +TEST_F(LulDwarfEHFrame, EmptyZ) { + Label cie; + section.Mark(&cie) + .CIEHeader(5955, 5805, 228, 1, "z") + .ULEB128(0) // Augmentation data length + .D8(lul::DW_CFA_def_cfa) + .ULEB128(3629) + .ULEB128(247) + .FinishEntry() + .FDEHeader(cie, 0xda007738, 0xfb55c641) + .ULEB128(0) // Augmentation data length + .D8(lul::DW_CFA_advance_loc1) + .D8(11) + .D8(lul::DW_CFA_undefined) + .ULEB128(3769) + .FinishEntry(); + + PERHAPS_WRITE_EH_FRAME_FILE("EHFrame.EmptyZ", section); + + EXPECT_CALL(handler, Entry(_, 0xda007738, 0xfb55c641, 1, "z", 228)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, ValOffsetRule(0xda007738, kCFARegister, 3629, 247)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, UndefinedRule(0xda007738 + 11 * 5955, 3769)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseEHFrameSection(§ion); +} + +// Check that we recognize bad 'z' augmentation characters. +TEST_F(LulDwarfEHFrame, BadZ) { + Label cie; + section.Mark(&cie) + .CIEHeader(6937, 1045, 142, 1, "zQ") + .ULEB128(0) // Augmentation data length + .D8(lul::DW_CFA_def_cfa) + .ULEB128(9006) + .ULEB128(7725) + .FinishEntry() + .FDEHeader(cie, 0x1293efa8, 0x236f53f2) + .ULEB128(0) // Augmentation data length + .D8(lul::DW_CFA_advance_loc | 12) + .D8(lul::DW_CFA_register) + .ULEB128(5667) + .ULEB128(3462) + .FinishEntry(); + + PERHAPS_WRITE_EH_FRAME_FILE("EHFrame.BadZ", section); + + EXPECT_CALL(reporter, UnrecognizedAugmentation(_, "zQ")).WillOnce(Return()); + + ParseEHFrameSection(§ion, false); +} + +TEST_F(LulDwarfEHFrame, zL) { + Label cie; + lul::DwarfPointerEncoding lsda_encoding = + lul::DwarfPointerEncoding(lul::DW_EH_PE_funcrel | lul::DW_EH_PE_udata2); + section.Mark(&cie) + .CIEHeader(9285, 9959, 54, 1, "zL") + .ULEB128(1) // Augmentation data length + .D8(lsda_encoding) // encoding for LSDA pointer in FDE + + .FinishEntry() + .FDEHeader(cie, 0xd40091aa, 0x9aa6e746) + .ULEB128(2) // Augmentation data length + .EncodedPointer(0xd40099cd, lsda_encoding) // LSDA pointer + .FinishEntry() + .D32(0); // terminator + + PERHAPS_WRITE_EH_FRAME_FILE("EHFrame.zL", section); + + EXPECT_CALL(handler, Entry(_, 0xd40091aa, 0x9aa6e746, 1, "zL", 54)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, LanguageSpecificDataArea(0xd40099cd, false)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseEHFrameSection(§ion); +} + +TEST_F(LulDwarfEHFrame, zP) { + Label cie; + lul::DwarfPointerEncoding personality_encoding = + lul::DwarfPointerEncoding(lul::DW_EH_PE_datarel | lul::DW_EH_PE_udata2); + section.Mark(&cie) + .CIEHeader(1097, 6313, 17, 1, "zP") + .ULEB128(3) // Augmentation data length + .D8(personality_encoding) // encoding for personality routine + .EncodedPointer(0xe3eaccac, personality_encoding) // value + .FinishEntry() + .FDEHeader(cie, 0x0c8350c9, 0xbef11087) + .ULEB128(0) // Augmentation data length + .FinishEntry() + .D32(0); // terminator + + PERHAPS_WRITE_EH_FRAME_FILE("EHFrame.zP", section); + + EXPECT_CALL(handler, Entry(_, 0x0c8350c9, 0xbef11087, 1, "zP", 17)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, PersonalityRoutine(0xe3eaccac, false)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseEHFrameSection(§ion); +} + +TEST_F(LulDwarfEHFrame, zR) { + Label cie; + lul::DwarfPointerEncoding pointer_encoding = + lul::DwarfPointerEncoding(lul::DW_EH_PE_textrel | lul::DW_EH_PE_sdata2); + section.SetPointerEncoding(pointer_encoding); + section.Mark(&cie) + .CIEHeader(8011, 5496, 75, 1, "zR") + .ULEB128(1) // Augmentation data length + .D8(pointer_encoding) // encoding for FDE addresses + .FinishEntry() + .FDEHeader(cie, 0x540f9431, 0xbd0) + .ULEB128(0) // Augmentation data length + .FinishEntry() + .D32(0); // terminator + + PERHAPS_WRITE_EH_FRAME_FILE("EHFrame.zR", section); + + EXPECT_CALL(handler, Entry(_, 0x540f9431, 0xbd0, 1, "zR", 75)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseEHFrameSection(§ion); +} + +TEST_F(LulDwarfEHFrame, zS) { + Label cie; + section.Mark(&cie) + .CIEHeader(9217, 7694, 57, 1, "zS") + .ULEB128(0) // Augmentation data length + .FinishEntry() + .FDEHeader(cie, 0xd40091aa, 0x9aa6e746) + .ULEB128(0) // Augmentation data length + .FinishEntry() + .D32(0); // terminator + + PERHAPS_WRITE_EH_FRAME_FILE("EHFrame.zS", section); + + EXPECT_CALL(handler, Entry(_, 0xd40091aa, 0x9aa6e746, 1, "zS", 57)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, SignalHandler()).InSequence(s).WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseEHFrameSection(§ion); +} + +// These tests require manual inspection of the test output. +struct CFIReporterFixture { + CFIReporterFixture() + : reporter(gtest_logging_sink_for_LulTestDwarf, "test file name", + "test section name") {} + CallFrameInfo::Reporter reporter; +}; + +class LulDwarfCFIReporter : public CFIReporterFixture, public Test {}; + +TEST_F(LulDwarfCFIReporter, Incomplete) { + reporter.Incomplete(0x0102030405060708ULL, CallFrameInfo::kUnknown); +} + +TEST_F(LulDwarfCFIReporter, EarlyEHTerminator) { + reporter.EarlyEHTerminator(0x0102030405060708ULL); +} + +TEST_F(LulDwarfCFIReporter, CIEPointerOutOfRange) { + reporter.CIEPointerOutOfRange(0x0123456789abcdefULL, 0xfedcba9876543210ULL); +} + +TEST_F(LulDwarfCFIReporter, BadCIEId) { + reporter.BadCIEId(0x0123456789abcdefULL, 0xfedcba9876543210ULL); +} + +TEST_F(LulDwarfCFIReporter, UnrecognizedVersion) { + reporter.UnrecognizedVersion(0x0123456789abcdefULL, 43); +} + +TEST_F(LulDwarfCFIReporter, UnrecognizedAugmentation) { + reporter.UnrecognizedAugmentation(0x0123456789abcdefULL, "poodles"); +} + +TEST_F(LulDwarfCFIReporter, InvalidPointerEncoding) { + reporter.InvalidPointerEncoding(0x0123456789abcdefULL, 0x42); +} + +TEST_F(LulDwarfCFIReporter, UnusablePointerEncoding) { + reporter.UnusablePointerEncoding(0x0123456789abcdefULL, 0x42); +} + +TEST_F(LulDwarfCFIReporter, RestoreInCIE) { + reporter.RestoreInCIE(0x0123456789abcdefULL, 0xfedcba9876543210ULL); +} + +TEST_F(LulDwarfCFIReporter, BadInstruction) { + reporter.BadInstruction(0x0123456789abcdefULL, CallFrameInfo::kFDE, + 0xfedcba9876543210ULL); +} + +TEST_F(LulDwarfCFIReporter, NoCFARule) { + reporter.NoCFARule(0x0123456789abcdefULL, CallFrameInfo::kCIE, + 0xfedcba9876543210ULL); +} + +TEST_F(LulDwarfCFIReporter, EmptyStateStack) { + reporter.EmptyStateStack(0x0123456789abcdefULL, CallFrameInfo::kTerminator, + 0xfedcba9876543210ULL); +} + +TEST_F(LulDwarfCFIReporter, ClearingCFARule) { + reporter.ClearingCFARule(0x0123456789abcdefULL, CallFrameInfo::kFDE, + 0xfedcba9876543210ULL); +} +class LulDwarfExpr : public Test {}; + +class MockSummariser : public Summariser { + public: + MockSummariser() : Summariser(nullptr, 0, nullptr) {} + MOCK_METHOD2(Entry, void(uintptr_t, uintptr_t)); + MOCK_METHOD0(End, void()); + MOCK_METHOD5(Rule, void(uintptr_t, int, LExprHow, int16_t, int64_t)); + MOCK_METHOD1(AddPfxInstr, uint32_t(PfxInstr)); +}; + +TEST_F(LulDwarfExpr, SimpleTransliteration) { + MockSummariser summ; + ByteReader reader(ENDIANNESS_LITTLE); + + CFISection section(kLittleEndian, 8); + section.D8(DW_OP_lit0) + .D8(DW_OP_lit31) + .D8(DW_OP_breg0 + 17) + .LEB128(-1234) + .D8(DW_OP_const4s) + .D32(0xFEDC9876) + .D8(DW_OP_deref) + .D8(DW_OP_and) + .D8(DW_OP_plus) + .D8(DW_OP_minus) + .D8(DW_OP_shl) + .D8(DW_OP_ge); + string expr; + bool ok = section.GetContents(&expr); + EXPECT_TRUE(ok); + + { + InSequence s; + // required start marker + EXPECT_CALL(summ, AddPfxInstr(PfxInstr(PX_Start, 0))); + // DW_OP_lit0 + EXPECT_CALL(summ, AddPfxInstr(PfxInstr(PX_SImm32, 0))); + // DW_OP_lit31 + EXPECT_CALL(summ, AddPfxInstr(PfxInstr(PX_SImm32, 31))); + // DW_OP_breg17 -1234 + EXPECT_CALL(summ, AddPfxInstr(PfxInstr(PX_DwReg, 17))); + EXPECT_CALL(summ, AddPfxInstr(PfxInstr(PX_SImm32, -1234))); + EXPECT_CALL(summ, AddPfxInstr(PfxInstr(PX_Add))); + // DW_OP_const4s 0xFEDC9876 + EXPECT_CALL(summ, AddPfxInstr(PfxInstr(PX_SImm32, 0xFEDC9876))); + // DW_OP_deref + EXPECT_CALL(summ, AddPfxInstr(PfxInstr(PX_Deref))); + // DW_OP_and + EXPECT_CALL(summ, AddPfxInstr(PfxInstr(PX_And))); + // DW_OP_plus + EXPECT_CALL(summ, AddPfxInstr(PfxInstr(PX_Add))); + // DW_OP_minus + EXPECT_CALL(summ, AddPfxInstr(PfxInstr(PX_Sub))); + // DW_OP_shl + EXPECT_CALL(summ, AddPfxInstr(PfxInstr(PX_Shl))); + // DW_OP_ge + EXPECT_CALL(summ, AddPfxInstr(PfxInstr(PX_CmpGES))); + // required end marker + EXPECT_CALL(summ, AddPfxInstr(PfxInstr(PX_End))); + } + + int32_t ix = + parseDwarfExpr(&summ, &reader, ImageSlice(expr), false, false, false); + EXPECT_TRUE(ix >= 0); +} + +TEST_F(LulDwarfExpr, UnknownOpcode) { + MockSummariser summ; + ByteReader reader(ENDIANNESS_LITTLE); + + CFISection section(kLittleEndian, 8); + section.D8(DW_OP_lo_user - 1); + string expr; + bool ok = section.GetContents(&expr); + EXPECT_TRUE(ok); + + { + InSequence s; + // required start marker + EXPECT_CALL(summ, AddPfxInstr(PfxInstr(PX_Start, 0))); + } + + int32_t ix = + parseDwarfExpr(&summ, &reader, ImageSlice(expr), false, false, false); + EXPECT_TRUE(ix == -1); +} + +TEST_F(LulDwarfExpr, ExpressionOverrun) { + MockSummariser summ; + ByteReader reader(ENDIANNESS_LITTLE); + + CFISection section(kLittleEndian, 8); + section.D8(DW_OP_const4s).D8(0x12).D8(0x34).D8(0x56); + string expr; + bool ok = section.GetContents(&expr); + EXPECT_TRUE(ok); + + { + InSequence s; + // required start marker + EXPECT_CALL(summ, AddPfxInstr(PfxInstr(PX_Start, 0))); + // DW_OP_const4s followed by 3 (a.k.a. not enough) bytes + // We expect PfxInstr(PX_Simm32, not-known-for-sure-32-bit-immediate) + // Hence must use _ as the argument. + EXPECT_CALL(summ, AddPfxInstr(_)); + } + + int32_t ix = + parseDwarfExpr(&summ, &reader, ImageSlice(expr), false, false, false); + EXPECT_TRUE(ix == -1); +} + +// We'll need to mention specific Dwarf registers in the EvaluatePfxExpr tests, +// and those names are arch-specific, so a bit of macro magic is helpful. +#if defined(GP_ARCH_arm) +# define TESTED_REG_STRUCT_NAME r11 +# define TESTED_REG_DWARF_NAME DW_REG_ARM_R11 +#elif defined(GP_ARCH_arm64) +# define TESTED_REG_STRUCT_NAME x29 +# define TESTED_REG_DWARF_NAME DW_REG_AARCH64_X29 +#elif defined(GP_ARCH_amd64) || defined(GP_ARCH_x86) +# define TESTED_REG_STRUCT_NAME xbp +# define TESTED_REG_DWARF_NAME DW_REG_INTEL_XBP +#else +# error "Unknown plat" +#endif + +struct EvaluatePfxExprFixture { + // Creates: + // initial stack, AVMA 0x12345678, at offset 4 bytes = 0xdeadbeef + // initial regs, with XBP = 0x14141356 + // initial CFA = 0x5432ABCD + EvaluatePfxExprFixture() { + // The test stack. + si.mStartAvma = 0x12345678; + si.mLen = 0; +#define XX(_byte) \ + do { \ + si.mContents[si.mLen++] = (_byte); \ + } while (0) + XX(0x55); + XX(0x55); + XX(0x55); + XX(0x55); + if (sizeof(void*) == 8) { + // le64 + XX(0xEF); + XX(0xBE); + XX(0xAD); + XX(0xDE); + XX(0); + XX(0); + XX(0); + XX(0); + } else { + // le32 + XX(0xEF); + XX(0xBE); + XX(0xAD); + XX(0xDE); + } + XX(0xAA); + XX(0xAA); + XX(0xAA); + XX(0xAA); +#undef XX + // The initial CFA. + initialCFA = TaggedUWord(0x5432ABCD); + // The initial register state. + memset(®s, 0, sizeof(regs)); + regs.TESTED_REG_STRUCT_NAME = TaggedUWord(0x14141356); + } + + StackImage si; + TaggedUWord initialCFA; + UnwindRegs regs; +}; + +class LulDwarfEvaluatePfxExpr : public EvaluatePfxExprFixture, public Test {}; + +TEST_F(LulDwarfEvaluatePfxExpr, NormalEvaluation) { + vector<PfxInstr> instrs; + // Put some junk at the start of the insn sequence. + instrs.push_back(PfxInstr(PX_End)); + instrs.push_back(PfxInstr(PX_End)); + + // Now the real sequence + // stack is empty + instrs.push_back(PfxInstr(PX_Start, 1)); + // 0x5432ABCD + instrs.push_back(PfxInstr(PX_SImm32, 0x31415927)); + // 0x5432ABCD 0x31415927 + instrs.push_back(PfxInstr(PX_DwReg, TESTED_REG_DWARF_NAME)); + // 0x5432ABCD 0x31415927 0x14141356 + instrs.push_back(PfxInstr(PX_SImm32, 42)); + // 0x5432ABCD 0x31415927 0x14141356 42 + instrs.push_back(PfxInstr(PX_Sub)); + // 0x5432ABCD 0x31415927 0x1414132c + instrs.push_back(PfxInstr(PX_Add)); + // 0x5432ABCD 0x45556c53 + instrs.push_back(PfxInstr(PX_SImm32, si.mStartAvma + 4)); + // 0x5432ABCD 0x45556c53 0x1234567c + instrs.push_back(PfxInstr(PX_Deref)); + // 0x5432ABCD 0x45556c53 0xdeadbeef + instrs.push_back(PfxInstr(PX_SImm32, 0xFE01DC23)); + // 0x5432ABCD 0x45556c53 0xdeadbeef 0xFE01DC23 + instrs.push_back(PfxInstr(PX_And)); + // 0x5432ABCD 0x45556c53 0xde019c23 + instrs.push_back(PfxInstr(PX_SImm32, 7)); + // 0x5432ABCD 0x45556c53 0xde019c23 7 + instrs.push_back(PfxInstr(PX_Shl)); + // 0x5432ABCD 0x45556c53 0x6f00ce1180 + instrs.push_back(PfxInstr(PX_SImm32, 0x7fffffff)); + // 0x5432ABCD 0x45556c53 0x6f00ce1180 7fffffff + instrs.push_back(PfxInstr(PX_And)); + // 0x5432ABCD 0x45556c53 0x00ce1180 + instrs.push_back(PfxInstr(PX_Add)); + // 0x5432ABCD 0x46237dd3 + instrs.push_back(PfxInstr(PX_Sub)); + // 0xe0f2dfa + + instrs.push_back(PfxInstr(PX_End)); + + TaggedUWord res = EvaluatePfxExpr(2 /*offset of start insn*/, ®s, + initialCFA, &si, instrs); + EXPECT_TRUE(res.Valid()); + EXPECT_TRUE(res.Value() == 0xe0f2dfa); +} + +TEST_F(LulDwarfEvaluatePfxExpr, EmptySequence) { + vector<PfxInstr> instrs; + TaggedUWord res = EvaluatePfxExpr(0, ®s, initialCFA, &si, instrs); + EXPECT_FALSE(res.Valid()); +} + +TEST_F(LulDwarfEvaluatePfxExpr, BogusStartPoint) { + vector<PfxInstr> instrs; + instrs.push_back(PfxInstr(PX_SImm32, 42)); + instrs.push_back(PfxInstr(PX_SImm32, 24)); + instrs.push_back(PfxInstr(PX_SImm32, 4224)); + TaggedUWord res = EvaluatePfxExpr(1, ®s, initialCFA, &si, instrs); + EXPECT_FALSE(res.Valid()); +} + +TEST_F(LulDwarfEvaluatePfxExpr, MissingEndMarker) { + vector<PfxInstr> instrs; + instrs.push_back(PfxInstr(PX_Start, 0)); + instrs.push_back(PfxInstr(PX_SImm32, 24)); + TaggedUWord res = EvaluatePfxExpr(0, ®s, initialCFA, &si, instrs); + EXPECT_FALSE(res.Valid()); +} + +TEST_F(LulDwarfEvaluatePfxExpr, StackUnderflow) { + vector<PfxInstr> instrs; + instrs.push_back(PfxInstr(PX_Start, 0)); + instrs.push_back(PfxInstr(PX_End)); + TaggedUWord res = EvaluatePfxExpr(0, ®s, initialCFA, &si, instrs); + EXPECT_FALSE(res.Valid()); +} + +TEST_F(LulDwarfEvaluatePfxExpr, StackNoUnderflow) { + vector<PfxInstr> instrs; + instrs.push_back(PfxInstr(PX_Start, 1 /*push the initial CFA*/)); + instrs.push_back(PfxInstr(PX_End)); + TaggedUWord res = EvaluatePfxExpr(0, ®s, initialCFA, &si, instrs); + EXPECT_TRUE(res.Valid()); + EXPECT_TRUE(res == initialCFA); +} + +TEST_F(LulDwarfEvaluatePfxExpr, StackOverflow) { + vector<PfxInstr> instrs; + instrs.push_back(PfxInstr(PX_Start, 0)); + for (int i = 0; i < 10 + 1; i++) { + instrs.push_back(PfxInstr(PX_SImm32, i + 100)); + } + instrs.push_back(PfxInstr(PX_End)); + TaggedUWord res = EvaluatePfxExpr(0, ®s, initialCFA, &si, instrs); + EXPECT_FALSE(res.Valid()); +} + +TEST_F(LulDwarfEvaluatePfxExpr, StackNoOverflow) { + vector<PfxInstr> instrs; + instrs.push_back(PfxInstr(PX_Start, 0)); + for (int i = 0; i < 10 + 0; i++) { + instrs.push_back(PfxInstr(PX_SImm32, i + 100)); + } + instrs.push_back(PfxInstr(PX_End)); + TaggedUWord res = EvaluatePfxExpr(0, ®s, initialCFA, &si, instrs); + EXPECT_TRUE(res.Valid()); + EXPECT_TRUE(res == TaggedUWord(109)); +} + +TEST_F(LulDwarfEvaluatePfxExpr, OutOfRangeShl) { + vector<PfxInstr> instrs; + instrs.push_back(PfxInstr(PX_Start, 0)); + instrs.push_back(PfxInstr(PX_SImm32, 1234)); + instrs.push_back(PfxInstr(PX_SImm32, 5678)); + instrs.push_back(PfxInstr(PX_Shl)); + TaggedUWord res = EvaluatePfxExpr(0, ®s, initialCFA, &si, instrs); + EXPECT_TRUE(!res.Valid()); +} + +TEST_F(LulDwarfEvaluatePfxExpr, TestCmpGES) { + const int32_t argsL[6] = {0, 0, 1, -2, -1, -2}; + const int32_t argsR[6] = {0, 1, 0, -2, -2, -1}; + // expecting: t f t t t f = 101110 = 0x2E + vector<PfxInstr> instrs; + instrs.push_back(PfxInstr(PX_Start, 0)); + // The "running total" + instrs.push_back(PfxInstr(PX_SImm32, 0)); + for (unsigned int i = 0; i < sizeof(argsL) / sizeof(argsL[0]); i++) { + // Shift the "running total" at the bottom of the stack left by one bit + instrs.push_back(PfxInstr(PX_SImm32, 1)); + instrs.push_back(PfxInstr(PX_Shl)); + // Push both test args and do the comparison + instrs.push_back(PfxInstr(PX_SImm32, argsL[i])); + instrs.push_back(PfxInstr(PX_SImm32, argsR[i])); + instrs.push_back(PfxInstr(PX_CmpGES)); + // Or the result into the running total + instrs.push_back(PfxInstr(PX_Or)); + } + instrs.push_back(PfxInstr(PX_End)); + TaggedUWord res = EvaluatePfxExpr(0, ®s, initialCFA, &si, instrs); + EXPECT_TRUE(res.Valid()); + EXPECT_TRUE(res == TaggedUWord(0x2E)); +} + +} // namespace lul diff --git a/tools/profiler/tests/gtest/LulTestInfrastructure.cpp b/tools/profiler/tests/gtest/LulTestInfrastructure.cpp new file mode 100644 index 0000000000..6d49557e9c --- /dev/null +++ b/tools/profiler/tests/gtest/LulTestInfrastructure.cpp @@ -0,0 +1,498 @@ +// Copyright (c) 2010, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Original author: Jim Blandy <jimb@mozilla.com> <jimb@red-bean.com> + +// Derived from: +// test_assembler.cc: Implementation of google_breakpad::TestAssembler. +// See test_assembler.h for details. + +// Derived from: +// cfi_assembler.cc: Implementation of google_breakpad::CFISection class. +// See cfi_assembler.h for details. + +#include "LulTestInfrastructure.h" + +#include "LulDwarfInt.h" + +#include <cassert> + +namespace lul_test { +namespace test_assembler { + +using std::back_insert_iterator; + +Label::Label() : value_(new Binding()) {} +Label::Label(uint64_t value) : value_(new Binding(value)) {} +Label::Label(const Label& label) { + value_ = label.value_; + value_->Acquire(); +} +Label::~Label() { + if (value_->Release()) delete value_; +} + +Label& Label::operator=(uint64_t value) { + value_->Set(NULL, value); + return *this; +} + +Label& Label::operator=(const Label& label) { + value_->Set(label.value_, 0); + return *this; +} + +Label Label::operator+(uint64_t addend) const { + Label l; + l.value_->Set(this->value_, addend); + return l; +} + +Label Label::operator-(uint64_t subtrahend) const { + Label l; + l.value_->Set(this->value_, -subtrahend); + return l; +} + +// When NDEBUG is #defined, assert doesn't evaluate its argument. This +// means you can't simply use assert to check the return value of a +// function with necessary side effects. +// +// ALWAYS_EVALUATE_AND_ASSERT(x) evaluates x regardless of whether +// NDEBUG is #defined; when NDEBUG is not #defined, it further asserts +// that x is true. +#ifdef NDEBUG +# define ALWAYS_EVALUATE_AND_ASSERT(x) x +#else +# define ALWAYS_EVALUATE_AND_ASSERT(x) assert(x) +#endif + +uint64_t Label::operator-(const Label& label) const { + uint64_t offset; + ALWAYS_EVALUATE_AND_ASSERT(IsKnownOffsetFrom(label, &offset)); + return offset; +} + +bool Label::IsKnownConstant(uint64_t* value_p) const { + Binding* base; + uint64_t addend; + value_->Get(&base, &addend); + if (base != NULL) return false; + if (value_p) *value_p = addend; + return true; +} + +bool Label::IsKnownOffsetFrom(const Label& label, uint64_t* offset_p) const { + Binding *label_base, *this_base; + uint64_t label_addend, this_addend; + label.value_->Get(&label_base, &label_addend); + value_->Get(&this_base, &this_addend); + // If this and label are related, Get will find their final + // common ancestor, regardless of how indirect the relation is. This + // comparison also handles the constant vs. constant case. + if (this_base != label_base) return false; + if (offset_p) *offset_p = this_addend - label_addend; + return true; +} + +Label::Binding::Binding() : base_(this), addend_(), reference_count_(1) {} + +Label::Binding::Binding(uint64_t addend) + : base_(NULL), addend_(addend), reference_count_(1) {} + +Label::Binding::~Binding() { + assert(reference_count_ == 0); + if (base_ && base_ != this && base_->Release()) delete base_; +} + +void Label::Binding::Set(Binding* binding, uint64_t addend) { + if (!base_ && !binding) { + // We're equating two constants. This could be okay. + assert(addend_ == addend); + } else if (!base_) { + // We are a known constant, but BINDING may not be, so turn the + // tables and try to set BINDING's value instead. + binding->Set(NULL, addend_ - addend); + } else { + if (binding) { + // Find binding's final value. Since the final value is always either + // completely unconstrained or a constant, never a reference to + // another variable (otherwise, it wouldn't be final), this + // guarantees we won't create cycles here, even for code like this: + // l = m, m = n, n = l; + uint64_t binding_addend; + binding->Get(&binding, &binding_addend); + addend += binding_addend; + } + + // It seems likely that setting a binding to itself is a bug + // (although I can imagine this might turn out to be helpful to + // permit). + assert(binding != this); + + if (base_ != this) { + // Set the other bindings on our chain as well. Note that this + // is sufficient even though binding relationships form trees: + // All binding operations traverse their chains to the end, and + // all bindings related to us share some tail of our chain, so + // they will see the changes we make here. + base_->Set(binding, addend - addend_); + // We're not going to use base_ any more. + if (base_->Release()) delete base_; + } + + // Adopt BINDING as our base. Note that it should be correct to + // acquire here, after the release above, even though the usual + // reference-counting rules call for acquiring first, and then + // releasing: the self-reference assertion above should have + // complained if BINDING were 'this' or anywhere along our chain, + // so we didn't release BINDING. + if (binding) binding->Acquire(); + base_ = binding; + addend_ = addend; + } +} + +void Label::Binding::Get(Binding** base, uint64_t* addend) { + if (base_ && base_ != this) { + // Recurse to find the end of our reference chain (the root of our + // tree), and then rewrite every binding along the chain to refer + // to it directly, adjusting addends appropriately. (This is why + // this member function isn't this-const.) + Binding* final_base; + uint64_t final_addend; + base_->Get(&final_base, &final_addend); + if (final_base) final_base->Acquire(); + if (base_->Release()) delete base_; + base_ = final_base; + addend_ += final_addend; + } + *base = base_; + *addend = addend_; +} + +template <typename Inserter> +static inline void InsertEndian(test_assembler::Endianness endianness, + size_t size, uint64_t number, Inserter dest) { + assert(size > 0); + if (endianness == kLittleEndian) { + for (size_t i = 0; i < size; i++) { + *dest++ = (char)(number & 0xff); + number >>= 8; + } + } else { + assert(endianness == kBigEndian); + // The loop condition is odd, but it's correct for size_t. + for (size_t i = size - 1; i < size; i--) + *dest++ = (char)((number >> (i * 8)) & 0xff); + } +} + +Section& Section::Append(Endianness endianness, size_t size, uint64_t number) { + InsertEndian(endianness, size, number, + back_insert_iterator<string>(contents_)); + return *this; +} + +Section& Section::Append(Endianness endianness, size_t size, + const Label& label) { + // If this label's value is known, there's no reason to waste an + // entry in references_ on it. + uint64_t value; + if (label.IsKnownConstant(&value)) return Append(endianness, size, value); + + // This will get caught when the references are resolved, but it's + // nicer to find out earlier. + assert(endianness != kUnsetEndian); + + references_.push_back(Reference(contents_.size(), endianness, size, label)); + contents_.append(size, 0); + return *this; +} + +#define ENDIANNESS_L kLittleEndian +#define ENDIANNESS_B kBigEndian +#define ENDIANNESS(e) ENDIANNESS_##e + +#define DEFINE_SHORT_APPEND_NUMBER_ENDIAN(e, bits) \ + Section& Section::e##bits(uint##bits##_t v) { \ + InsertEndian(ENDIANNESS(e), bits / 8, v, \ + back_insert_iterator<string>(contents_)); \ + return *this; \ + } + +#define DEFINE_SHORT_APPEND_LABEL_ENDIAN(e, bits) \ + Section& Section::e##bits(const Label& v) { \ + return Append(ENDIANNESS(e), bits / 8, v); \ + } + +// Define L16, B32, and friends. +#define DEFINE_SHORT_APPEND_ENDIAN(e, bits) \ + DEFINE_SHORT_APPEND_NUMBER_ENDIAN(e, bits) \ + DEFINE_SHORT_APPEND_LABEL_ENDIAN(e, bits) + +DEFINE_SHORT_APPEND_LABEL_ENDIAN(L, 8); +DEFINE_SHORT_APPEND_LABEL_ENDIAN(B, 8); +DEFINE_SHORT_APPEND_ENDIAN(L, 16); +DEFINE_SHORT_APPEND_ENDIAN(L, 32); +DEFINE_SHORT_APPEND_ENDIAN(L, 64); +DEFINE_SHORT_APPEND_ENDIAN(B, 16); +DEFINE_SHORT_APPEND_ENDIAN(B, 32); +DEFINE_SHORT_APPEND_ENDIAN(B, 64); + +#define DEFINE_SHORT_APPEND_NUMBER_DEFAULT(bits) \ + Section& Section::D##bits(uint##bits##_t v) { \ + InsertEndian(endianness_, bits / 8, v, \ + back_insert_iterator<string>(contents_)); \ + return *this; \ + } +#define DEFINE_SHORT_APPEND_LABEL_DEFAULT(bits) \ + Section& Section::D##bits(const Label& v) { \ + return Append(endianness_, bits / 8, v); \ + } +#define DEFINE_SHORT_APPEND_DEFAULT(bits) \ + DEFINE_SHORT_APPEND_NUMBER_DEFAULT(bits) \ + DEFINE_SHORT_APPEND_LABEL_DEFAULT(bits) + +DEFINE_SHORT_APPEND_LABEL_DEFAULT(8) +DEFINE_SHORT_APPEND_DEFAULT(16); +DEFINE_SHORT_APPEND_DEFAULT(32); +DEFINE_SHORT_APPEND_DEFAULT(64); + +Section& Section::LEB128(long long value) { + while (value < -0x40 || 0x3f < value) { + contents_ += (value & 0x7f) | 0x80; + if (value < 0) + value = (value >> 7) | ~(((unsigned long long)-1) >> 7); + else + value = (value >> 7); + } + contents_ += value & 0x7f; + return *this; +} + +Section& Section::ULEB128(uint64_t value) { + while (value > 0x7f) { + contents_ += (value & 0x7f) | 0x80; + value = (value >> 7); + } + contents_ += value; + return *this; +} + +Section& Section::Align(size_t alignment, uint8_t pad_byte) { + // ALIGNMENT must be a power of two. + assert(((alignment - 1) & alignment) == 0); + size_t new_size = (contents_.size() + alignment - 1) & ~(alignment - 1); + contents_.append(new_size - contents_.size(), pad_byte); + assert((contents_.size() & (alignment - 1)) == 0); + return *this; +} + +bool Section::GetContents(string* contents) { + // For each label reference, find the label's value, and patch it into + // the section's contents. + for (size_t i = 0; i < references_.size(); i++) { + Reference& r = references_[i]; + uint64_t value; + if (!r.label.IsKnownConstant(&value)) { + fprintf(stderr, "Undefined label #%zu at offset 0x%zx\n", i, r.offset); + return false; + } + assert(r.offset < contents_.size()); + assert(contents_.size() - r.offset >= r.size); + InsertEndian(r.endianness, r.size, value, contents_.begin() + r.offset); + } + contents->clear(); + std::swap(contents_, *contents); + references_.clear(); + return true; +} + +} // namespace test_assembler +} // namespace lul_test + +namespace lul_test { + +CFISection& CFISection::CIEHeader(uint64_t code_alignment_factor, + int data_alignment_factor, + unsigned return_address_register, + uint8_t version, const string& augmentation, + bool dwarf64) { + assert(!entry_length_); + entry_length_ = new PendingLength(); + in_fde_ = false; + + if (dwarf64) { + D32(kDwarf64InitialLengthMarker); + D64(entry_length_->length); + entry_length_->start = Here(); + D64(eh_frame_ ? kEHFrame64CIEIdentifier : kDwarf64CIEIdentifier); + } else { + D32(entry_length_->length); + entry_length_->start = Here(); + D32(eh_frame_ ? kEHFrame32CIEIdentifier : kDwarf32CIEIdentifier); + } + D8(version); + AppendCString(augmentation); + ULEB128(code_alignment_factor); + LEB128(data_alignment_factor); + if (version == 1) + D8(return_address_register); + else + ULEB128(return_address_register); + return *this; +} + +CFISection& CFISection::FDEHeader(Label cie_pointer, uint64_t initial_location, + uint64_t address_range, bool dwarf64) { + assert(!entry_length_); + entry_length_ = new PendingLength(); + in_fde_ = true; + fde_start_address_ = initial_location; + + if (dwarf64) { + D32(0xffffffff); + D64(entry_length_->length); + entry_length_->start = Here(); + if (eh_frame_) + D64(Here() - cie_pointer); + else + D64(cie_pointer); + } else { + D32(entry_length_->length); + entry_length_->start = Here(); + if (eh_frame_) + D32(Here() - cie_pointer); + else + D32(cie_pointer); + } + EncodedPointer(initial_location); + // The FDE length in an .eh_frame section uses the same encoding as the + // initial location, but ignores the base address (selected by the upper + // nybble of the encoding), as it's a length, not an address that can be + // made relative. + EncodedPointer(address_range, DwarfPointerEncoding(pointer_encoding_ & 0x0f)); + return *this; +} + +CFISection& CFISection::FinishEntry() { + assert(entry_length_); + Align(address_size_, lul::DW_CFA_nop); + entry_length_->length = Here() - entry_length_->start; + delete entry_length_; + entry_length_ = NULL; + in_fde_ = false; + return *this; +} + +CFISection& CFISection::EncodedPointer(uint64_t address, + DwarfPointerEncoding encoding, + const EncodedPointerBases& bases) { + // Omitted data is extremely easy to emit. + if (encoding == lul::DW_EH_PE_omit) return *this; + + // If (encoding & lul::DW_EH_PE_indirect) != 0, then we assume + // that ADDRESS is the address at which the pointer is stored --- in + // other words, that bit has no effect on how we write the pointer. + encoding = DwarfPointerEncoding(encoding & ~lul::DW_EH_PE_indirect); + + // Find the base address to which this pointer is relative. The upper + // nybble of the encoding specifies this. + uint64_t base; + switch (encoding & 0xf0) { + case lul::DW_EH_PE_absptr: + base = 0; + break; + case lul::DW_EH_PE_pcrel: + base = bases.cfi + Size(); + break; + case lul::DW_EH_PE_textrel: + base = bases.text; + break; + case lul::DW_EH_PE_datarel: + base = bases.data; + break; + case lul::DW_EH_PE_funcrel: + base = fde_start_address_; + break; + case lul::DW_EH_PE_aligned: + base = 0; + break; + default: + abort(); + }; + + // Make ADDRESS relative. Yes, this is appropriate even for "absptr" + // values; see gcc/unwind-pe.h. + address -= base; + + // Align the pointer, if required. + if ((encoding & 0xf0) == lul::DW_EH_PE_aligned) Align(AddressSize()); + + // Append ADDRESS to this section in the appropriate form. For the + // fixed-width forms, we don't need to differentiate between signed and + // unsigned encodings, because ADDRESS has already been extended to 64 + // bits before it was passed to us. + switch (encoding & 0x0f) { + case lul::DW_EH_PE_absptr: + Address(address); + break; + + case lul::DW_EH_PE_uleb128: + ULEB128(address); + break; + + case lul::DW_EH_PE_sleb128: + LEB128(address); + break; + + case lul::DW_EH_PE_udata2: + case lul::DW_EH_PE_sdata2: + D16(address); + break; + + case lul::DW_EH_PE_udata4: + case lul::DW_EH_PE_sdata4: + D32(address); + break; + + case lul::DW_EH_PE_udata8: + case lul::DW_EH_PE_sdata8: + D64(address); + break; + + default: + abort(); + } + + return *this; +}; + +} // namespace lul_test diff --git a/tools/profiler/tests/gtest/LulTestInfrastructure.h b/tools/profiler/tests/gtest/LulTestInfrastructure.h new file mode 100644 index 0000000000..9faa7ca858 --- /dev/null +++ b/tools/profiler/tests/gtest/LulTestInfrastructure.h @@ -0,0 +1,736 @@ +// -*- mode: C++ -*- + +// Copyright (c) 2010, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Original author: Jim Blandy <jimb@mozilla.com> <jimb@red-bean.com> + +// Derived from: +// cfi_assembler.h: Define CFISection, a class for creating properly +// (and improperly) formatted DWARF CFI data for unit tests. + +// Derived from: +// test-assembler.h: interface to class for building complex binary streams. + +// To test the Breakpad symbol dumper and processor thoroughly, for +// all combinations of host system and minidump processor +// architecture, we need to be able to easily generate complex test +// data like debugging information and minidump files. +// +// For example, if we want our unit tests to provide full code +// coverage for stack walking, it may be difficult to persuade the +// compiler to generate every possible sort of stack walking +// information that we want to support; there are probably DWARF CFI +// opcodes that GCC never emits. Similarly, if we want to test our +// error handling, we will need to generate damaged minidumps or +// debugging information that (we hope) the client or compiler will +// never produce on its own. +// +// google_breakpad::TestAssembler provides a predictable and +// (relatively) simple way to generate complex formatted data streams +// like minidumps and CFI. Furthermore, because TestAssembler is +// portable, developers without access to (say) Visual Studio or a +// SPARC assembler can still work on test data for those targets. + +#ifndef LUL_TEST_INFRASTRUCTURE_H +#define LUL_TEST_INFRASTRUCTURE_H + +#include "LulDwarfExt.h" + +#include <string> +#include <vector> + +using std::string; +using std::vector; + +namespace lul_test { +namespace test_assembler { + +// A Label represents a value not yet known that we need to store in a +// section. As long as all the labels a section refers to are defined +// by the time we retrieve its contents as bytes, we can use undefined +// labels freely in that section's construction. +// +// A label can be in one of three states: +// - undefined, +// - defined as the sum of some other label and a constant, or +// - a constant. +// +// A label's value never changes, but it can accumulate constraints. +// Adding labels and integers is permitted, and yields a label. +// Subtracting a constant from a label is permitted, and also yields a +// label. Subtracting two labels that have some relationship to each +// other is permitted, and yields a constant. +// +// For example: +// +// Label a; // a's value is undefined +// Label b; // b's value is undefined +// { +// Label c = a + 4; // okay, even though a's value is unknown +// b = c + 4; // also okay; b is now a+8 +// } +// Label d = b - 2; // okay; d == a+6, even though c is gone +// d.Value(); // error: d's value is not yet known +// d - a; // is 6, even though their values are not known +// a = 12; // now b == 20, and d == 18 +// d.Value(); // 18: no longer an error +// b.Value(); // 20 +// d = 10; // error: d is already defined. +// +// Label objects' lifetimes are unconstrained: notice that, in the +// above example, even though a and b are only related through c, and +// c goes out of scope, the assignment to a sets b's value as well. In +// particular, it's not necessary to ensure that a Label lives beyond +// Sections that refer to it. +class Label { + public: + Label(); // An undefined label. + explicit Label(uint64_t value); // A label with a fixed value + Label(const Label& value); // A label equal to another. + ~Label(); + + Label& operator=(uint64_t value); + Label& operator=(const Label& value); + Label operator+(uint64_t addend) const; + Label operator-(uint64_t subtrahend) const; + uint64_t operator-(const Label& subtrahend) const; + + // We could also provide == and != that work on undefined, but + // related, labels. + + // Return true if this label's value is known. If VALUE_P is given, + // set *VALUE_P to the known value if returning true. + bool IsKnownConstant(uint64_t* value_p = NULL) const; + + // Return true if the offset from LABEL to this label is known. If + // OFFSET_P is given, set *OFFSET_P to the offset when returning true. + // + // You can think of l.KnownOffsetFrom(m, &d) as being like 'd = l-m', + // except that it also returns a value indicating whether the + // subtraction is possible given what we currently know of l and m. + // It can be possible even if we don't know l and m's values. For + // example: + // + // Label l, m; + // m = l + 10; + // l.IsKnownConstant(); // false + // m.IsKnownConstant(); // false + // uint64_t d; + // l.IsKnownOffsetFrom(m, &d); // true, and sets d to -10. + // l-m // -10 + // m-l // 10 + // m.Value() // error: m's value is not known + bool IsKnownOffsetFrom(const Label& label, uint64_t* offset_p = NULL) const; + + private: + // A label's value, or if that is not yet known, how the value is + // related to other labels' values. A binding may be: + // - a known constant, + // - constrained to be equal to some other binding plus a constant, or + // - unconstrained, and free to take on any value. + // + // Many labels may point to a single binding, and each binding may + // refer to another, so bindings and labels form trees whose leaves + // are labels, whose interior nodes (and roots) are bindings, and + // where links point from children to parents. Bindings are + // reference counted, allowing labels to be lightweight, copyable, + // assignable, placed in containers, and so on. + class Binding { + public: + Binding(); + explicit Binding(uint64_t addend); + ~Binding(); + + // Increment our reference count. + void Acquire() { reference_count_++; }; + // Decrement our reference count, and return true if it is zero. + bool Release() { return --reference_count_ == 0; } + + // Set this binding to be equal to BINDING + ADDEND. If BINDING is + // NULL, then set this binding to the known constant ADDEND. + // Update every binding on this binding's chain to point directly + // to BINDING, or to be a constant, with addends adjusted + // appropriately. + void Set(Binding* binding, uint64_t value); + + // Return what we know about the value of this binding. + // - If this binding's value is a known constant, set BASE to + // NULL, and set ADDEND to its value. + // - If this binding is not a known constant but related to other + // bindings, set BASE to the binding at the end of the relation + // chain (which will always be unconstrained), and set ADDEND to the + // value to add to that binding's value to get this binding's + // value. + // - If this binding is unconstrained, set BASE to this, and leave + // ADDEND unchanged. + void Get(Binding** base, uint64_t* addend); + + private: + // There are three cases: + // + // - A binding representing a known constant value has base_ NULL, + // and addend_ equal to the value. + // + // - A binding representing a completely unconstrained value has + // base_ pointing to this; addend_ is unused. + // + // - A binding whose value is related to some other binding's + // value has base_ pointing to that other binding, and addend_ + // set to the amount to add to that binding's value to get this + // binding's value. We only represent relationships of the form + // x = y+c. + // + // Thus, the bind_ links form a chain terminating in either a + // known constant value or a completely unconstrained value. Most + // operations on bindings do path compression: they change every + // binding on the chain to point directly to the final value, + // adjusting addends as appropriate. + Binding* base_; + uint64_t addend_; + + // The number of Labels and Bindings pointing to this binding. + // (When a binding points to itself, indicating a completely + // unconstrained binding, that doesn't count as a reference.) + int reference_count_; + }; + + // This label's value. + Binding* value_; +}; + +// Conventions for representing larger numbers as sequences of bytes. +enum Endianness { + kBigEndian, // Big-endian: the most significant byte comes first. + kLittleEndian, // Little-endian: the least significant byte comes first. + kUnsetEndian, // used internally +}; + +// A section is a sequence of bytes, constructed by appending bytes +// to the end. Sections have a convenient and flexible set of member +// functions for appending data in various formats: big-endian and +// little-endian signed and unsigned values of different sizes; +// LEB128 and ULEB128 values (see below), and raw blocks of bytes. +// +// If you need to append a value to a section that is not convenient +// to compute immediately, you can create a label, append the +// label's value to the section, and then set the label's value +// later, when it's convenient to do so. Once a label's value is +// known, the section class takes care of updating all previously +// appended references to it. +// +// Once all the labels to which a section refers have had their +// values determined, you can get a copy of the section's contents +// as a string. +// +// Note that there is no specified "start of section" label. This is +// because there are typically several different meanings for "the +// start of a section": the offset of the section within an object +// file, the address in memory at which the section's content appear, +// and so on. It's up to the code that uses the Section class to +// keep track of these explicitly, as they depend on the application. +class Section { + public: + explicit Section(Endianness endianness = kUnsetEndian) + : endianness_(endianness){}; + + // A base class destructor should be either public and virtual, + // or protected and nonvirtual. + virtual ~Section(){}; + + // Return the default endianness of this section. + Endianness endianness() const { return endianness_; } + + // Append the SIZE bytes at DATA to the end of this section. Return + // a reference to this section. + Section& Append(const string& data) { + contents_.append(data); + return *this; + }; + + // Append data from SLICE to the end of this section. Return + // a reference to this section. + Section& Append(const lul::ImageSlice& slice) { + for (size_t i = 0; i < slice.length_; i++) { + contents_.append(1, slice.start_[i]); + } + return *this; + } + + // Append data from CSTRING to the end of this section. The terminating + // zero is not included. Return a reference to this section. + Section& Append(const char* cstring) { + for (size_t i = 0; cstring[i] != '\0'; i++) { + contents_.append(1, cstring[i]); + } + return *this; + } + + // Append SIZE copies of BYTE to the end of this section. Return a + // reference to this section. + Section& Append(size_t size, uint8_t byte) { + contents_.append(size, (char)byte); + return *this; + } + + // Append NUMBER to this section. ENDIANNESS is the endianness to + // use to write the number. SIZE is the length of the number in + // bytes. Return a reference to this section. + Section& Append(Endianness endianness, size_t size, uint64_t number); + Section& Append(Endianness endianness, size_t size, const Label& label); + + // Append SECTION to the end of this section. The labels SECTION + // refers to need not be defined yet. + // + // Note that this has no effect on any Labels' values, or on + // SECTION. If placing SECTION within 'this' provides new + // constraints on existing labels' values, then it's up to the + // caller to fiddle with those labels as needed. + Section& Append(const Section& section); + + // Append the contents of DATA as a series of bytes terminated by + // a NULL character. + Section& AppendCString(const string& data) { + Append(data); + contents_ += '\0'; + return *this; + } + + // Append VALUE or LABEL to this section, with the given bit width and + // endianness. Return a reference to this section. + // + // The names of these functions have the form <ENDIANNESS><BITWIDTH>: + // <ENDIANNESS> is either 'L' (little-endian, least significant byte first), + // 'B' (big-endian, most significant byte first), or + // 'D' (default, the section's default endianness) + // <BITWIDTH> is 8, 16, 32, or 64. + // + // Since endianness doesn't matter for a single byte, all the + // <BITWIDTH>=8 functions are equivalent. + // + // These can be used to write both signed and unsigned values, as + // the compiler will properly sign-extend a signed value before + // passing it to the function, at which point the function's + // behavior is the same either way. + Section& L8(uint8_t value) { + contents_ += value; + return *this; + } + Section& B8(uint8_t value) { + contents_ += value; + return *this; + } + Section& D8(uint8_t value) { + contents_ += value; + return *this; + } + Section &L16(uint16_t), &L32(uint32_t), &L64(uint64_t), &B16(uint16_t), + &B32(uint32_t), &B64(uint64_t), &D16(uint16_t), &D32(uint32_t), + &D64(uint64_t); + Section &L8(const Label& label), &L16(const Label& label), + &L32(const Label& label), &L64(const Label& label), + &B8(const Label& label), &B16(const Label& label), + &B32(const Label& label), &B64(const Label& label), + &D8(const Label& label), &D16(const Label& label), + &D32(const Label& label), &D64(const Label& label); + + // Append VALUE in a signed LEB128 (Little-Endian Base 128) form. + // + // The signed LEB128 representation of an integer N is a variable + // number of bytes: + // + // - If N is between -0x40 and 0x3f, then its signed LEB128 + // representation is a single byte whose value is N. + // + // - Otherwise, its signed LEB128 representation is (N & 0x7f) | + // 0x80, followed by the signed LEB128 representation of N / 128, + // rounded towards negative infinity. + // + // In other words, we break VALUE into groups of seven bits, put + // them in little-endian order, and then write them as eight-bit + // bytes with the high bit on all but the last. + // + // Note that VALUE cannot be a Label (we would have to implement + // relaxation). + Section& LEB128(long long value); + + // Append VALUE in unsigned LEB128 (Little-Endian Base 128) form. + // + // The unsigned LEB128 representation of an integer N is a variable + // number of bytes: + // + // - If N is between 0 and 0x7f, then its unsigned LEB128 + // representation is a single byte whose value is N. + // + // - Otherwise, its unsigned LEB128 representation is (N & 0x7f) | + // 0x80, followed by the unsigned LEB128 representation of N / + // 128, rounded towards negative infinity. + // + // Note that VALUE cannot be a Label (we would have to implement + // relaxation). + Section& ULEB128(uint64_t value); + + // Jump to the next location aligned on an ALIGNMENT-byte boundary, + // relative to the start of the section. Fill the gap with PAD_BYTE. + // ALIGNMENT must be a power of two. Return a reference to this + // section. + Section& Align(size_t alignment, uint8_t pad_byte = 0); + + // Return the current size of the section. + size_t Size() const { return contents_.size(); } + + // Return a label representing the start of the section. + // + // It is up to the user whether this label represents the section's + // position in an object file, the section's address in memory, or + // what have you; some applications may need both, in which case + // this simple-minded interface won't be enough. This class only + // provides a single start label, for use with the Here and Mark + // member functions. + // + // Ideally, we'd provide this in a subclass that actually knows more + // about the application at hand and can provide an appropriate + // collection of start labels. But then the appending member + // functions like Append and D32 would return a reference to the + // base class, not the derived class, and the chaining won't work. + // Since the only value here is in pretty notation, that's a fatal + // flaw. + Label start() const { return start_; } + + // Return a label representing the point at which the next Appended + // item will appear in the section, relative to start(). + Label Here() const { return start_ + Size(); } + + // Set *LABEL to Here, and return a reference to this section. + Section& Mark(Label* label) { + *label = Here(); + return *this; + } + + // If there are no undefined label references left in this + // section, set CONTENTS to the contents of this section, as a + // string, and clear this section. Return true on success, or false + // if there were still undefined labels. + bool GetContents(string* contents); + + private: + // Used internally. A reference to a label's value. + struct Reference { + Reference(size_t set_offset, Endianness set_endianness, size_t set_size, + const Label& set_label) + : offset(set_offset), + endianness(set_endianness), + size(set_size), + label(set_label) {} + + // The offset of the reference within the section. + size_t offset; + + // The endianness of the reference. + Endianness endianness; + + // The size of the reference. + size_t size; + + // The label to which this is a reference. + Label label; + }; + + // The default endianness of this section. + Endianness endianness_; + + // The contents of the section. + string contents_; + + // References to labels within those contents. + vector<Reference> references_; + + // A label referring to the beginning of the section. + Label start_; +}; + +} // namespace test_assembler +} // namespace lul_test + +namespace lul_test { + +using lul::DwarfPointerEncoding; +using lul_test::test_assembler::Endianness; +using lul_test::test_assembler::Label; +using lul_test::test_assembler::Section; + +class CFISection : public Section { + public: + // CFI augmentation strings beginning with 'z', defined by the + // Linux/IA-64 C++ ABI, can specify interesting encodings for + // addresses appearing in FDE headers and call frame instructions (and + // for additional fields whose presence the augmentation string + // specifies). In particular, pointers can be specified to be relative + // to various base address: the start of the .text section, the + // location holding the address itself, and so on. These allow the + // frame data to be position-independent even when they live in + // write-protected pages. These variants are specified at the + // following two URLs: + // + // http://refspecs.linux-foundation.org/LSB_4.0.0/LSB-Core-generic/LSB-Core-generic/dwarfext.html + // http://refspecs.linux-foundation.org/LSB_4.0.0/LSB-Core-generic/LSB-Core-generic/ehframechpt.html + // + // CFISection leaves the production of well-formed 'z'-augmented CIEs and + // FDEs to the user, but does provide EncodedPointer, to emit + // properly-encoded addresses for a given pointer encoding. + // EncodedPointer uses an instance of this structure to find the base + // addresses it should use; you can establish a default for all encoded + // pointers appended to this section with SetEncodedPointerBases. + struct EncodedPointerBases { + EncodedPointerBases() : cfi(), text(), data() {} + + // The starting address of this CFI section in memory, for + // DW_EH_PE_pcrel. DW_EH_PE_pcrel pointers may only be used in data + // that has is loaded into the program's address space. + uint64_t cfi; + + // The starting address of this file's .text section, for DW_EH_PE_textrel. + uint64_t text; + + // The starting address of this file's .got or .eh_frame_hdr section, + // for DW_EH_PE_datarel. + uint64_t data; + }; + + // Create a CFISection whose endianness is ENDIANNESS, and where + // machine addresses are ADDRESS_SIZE bytes long. If EH_FRAME is + // true, use the .eh_frame format, as described by the Linux + // Standards Base Core Specification, instead of the DWARF CFI + // format. + CFISection(Endianness endianness, size_t address_size, bool eh_frame = false) + : Section(endianness), + address_size_(address_size), + eh_frame_(eh_frame), + pointer_encoding_(lul::DW_EH_PE_absptr), + encoded_pointer_bases_(), + entry_length_(NULL), + in_fde_(false) { + // The 'start', 'Here', and 'Mark' members of a CFISection all refer + // to section offsets. + start() = 0; + } + + // Return this CFISection's address size. + size_t AddressSize() const { return address_size_; } + + // Return true if this CFISection uses the .eh_frame format, or + // false if it contains ordinary DWARF CFI data. + bool ContainsEHFrame() const { return eh_frame_; } + + // Use ENCODING for pointers in calls to FDEHeader and EncodedPointer. + void SetPointerEncoding(DwarfPointerEncoding encoding) { + pointer_encoding_ = encoding; + } + + // Use the addresses in BASES as the base addresses for encoded + // pointers in subsequent calls to FDEHeader or EncodedPointer. + // This function makes a copy of BASES. + void SetEncodedPointerBases(const EncodedPointerBases& bases) { + encoded_pointer_bases_ = bases; + } + + // Append a Common Information Entry header to this section with the + // given values. If dwarf64 is true, use the 64-bit DWARF initial + // length format for the CIE's initial length. Return a reference to + // this section. You should call FinishEntry after writing the last + // instruction for the CIE. + // + // Before calling this function, you will typically want to use Mark + // or Here to make a label to pass to FDEHeader that refers to this + // CIE's position in the section. + CFISection& CIEHeader(uint64_t code_alignment_factor, + int data_alignment_factor, + unsigned return_address_register, uint8_t version = 3, + const string& augmentation = "", bool dwarf64 = false); + + // Append a Frame Description Entry header to this section with the + // given values. If dwarf64 is true, use the 64-bit DWARF initial + // length format for the CIE's initial length. Return a reference to + // this section. You should call FinishEntry after writing the last + // instruction for the CIE. + // + // This function doesn't support entries that are longer than + // 0xffffff00 bytes. (The "initial length" is always a 32-bit + // value.) Nor does it support .debug_frame sections longer than + // 0xffffff00 bytes. + CFISection& FDEHeader(Label cie_pointer, uint64_t initial_location, + uint64_t address_range, bool dwarf64 = false); + + // Note the current position as the end of the last CIE or FDE we + // started, after padding with DW_CFA_nops for alignment. This + // defines the label representing the entry's length, cited in the + // entry's header. Return a reference to this section. + CFISection& FinishEntry(); + + // Append the contents of BLOCK as a DW_FORM_block value: an + // unsigned LEB128 length, followed by that many bytes of data. + CFISection& Block(const lul::ImageSlice& block) { + ULEB128(block.length_); + Append(block); + return *this; + } + + // Append data from CSTRING as a DW_FORM_block value: an unsigned LEB128 + // length, followed by that many bytes of data. The terminating zero is not + // included. + CFISection& Block(const char* cstring) { + ULEB128(strlen(cstring)); + Append(cstring); + return *this; + } + + // Append ADDRESS to this section, in the appropriate size and + // endianness. Return a reference to this section. + CFISection& Address(uint64_t address) { + Section::Append(endianness(), address_size_, address); + return *this; + } + + // Append ADDRESS to this section, using ENCODING and BASES. ENCODING + // defaults to this section's default encoding, established by + // SetPointerEncoding. BASES defaults to this section's bases, set by + // SetEncodedPointerBases. If the DW_EH_PE_indirect bit is set in the + // encoding, assume that ADDRESS is where the true address is stored. + // Return a reference to this section. + // + // (C++ doesn't let me use default arguments here, because I want to + // refer to members of *this in the default argument expression.) + CFISection& EncodedPointer(uint64_t address) { + return EncodedPointer(address, pointer_encoding_, encoded_pointer_bases_); + } + CFISection& EncodedPointer(uint64_t address, DwarfPointerEncoding encoding) { + return EncodedPointer(address, encoding, encoded_pointer_bases_); + } + CFISection& EncodedPointer(uint64_t address, DwarfPointerEncoding encoding, + const EncodedPointerBases& bases); + + // Restate some member functions, to keep chaining working nicely. + CFISection& Mark(Label* label) { + Section::Mark(label); + return *this; + } + CFISection& D8(uint8_t v) { + Section::D8(v); + return *this; + } + CFISection& D16(uint16_t v) { + Section::D16(v); + return *this; + } + CFISection& D16(Label v) { + Section::D16(v); + return *this; + } + CFISection& D32(uint32_t v) { + Section::D32(v); + return *this; + } + CFISection& D32(const Label& v) { + Section::D32(v); + return *this; + } + CFISection& D64(uint64_t v) { + Section::D64(v); + return *this; + } + CFISection& D64(const Label& v) { + Section::D64(v); + return *this; + } + CFISection& LEB128(long long v) { + Section::LEB128(v); + return *this; + } + CFISection& ULEB128(uint64_t v) { + Section::ULEB128(v); + return *this; + } + + private: + // A length value that we've appended to the section, but is not yet + // known. LENGTH is the appended value; START is a label referring + // to the start of the data whose length was cited. + struct PendingLength { + Label length; + Label start; + }; + + // Constants used in CFI/.eh_frame data: + + // If the first four bytes of an "initial length" are this constant, then + // the data uses the 64-bit DWARF format, and the length itself is the + // subsequent eight bytes. + static const uint32_t kDwarf64InitialLengthMarker = 0xffffffffU; + + // The CIE identifier for 32- and 64-bit DWARF CFI and .eh_frame data. + static const uint32_t kDwarf32CIEIdentifier = ~(uint32_t)0; + static const uint64_t kDwarf64CIEIdentifier = ~(uint64_t)0; + static const uint32_t kEHFrame32CIEIdentifier = 0; + static const uint64_t kEHFrame64CIEIdentifier = 0; + + // The size of a machine address for the data in this section. + size_t address_size_; + + // If true, we are generating a Linux .eh_frame section, instead of + // a standard DWARF .debug_frame section. + bool eh_frame_; + + // The encoding to use for FDE pointers. + DwarfPointerEncoding pointer_encoding_; + + // The base addresses to use when emitting encoded pointers. + EncodedPointerBases encoded_pointer_bases_; + + // The length value for the current entry. + // + // Oddly, this must be dynamically allocated. Labels never get new + // values; they only acquire constraints on the value they already + // have, or assert if you assign them something incompatible. So + // each header needs truly fresh Label objects to cite in their + // headers and track their positions. The alternative is explicit + // destructor invocation and a placement new. Ick. + PendingLength* entry_length_; + + // True if we are currently emitting an FDE --- that is, we have + // called FDEHeader but have not yet called FinishEntry. + bool in_fde_; + + // If in_fde_ is true, this is its starting address. We use this for + // emitting DW_EH_PE_funcrel pointers. + uint64_t fde_start_address_; +}; + +} // namespace lul_test + +#endif // LUL_TEST_INFRASTRUCTURE_H diff --git a/tools/profiler/tests/gtest/ThreadProfileTest.cpp b/tools/profiler/tests/gtest/ThreadProfileTest.cpp new file mode 100644 index 0000000000..b8a15c39b2 --- /dev/null +++ b/tools/profiler/tests/gtest/ThreadProfileTest.cpp @@ -0,0 +1,60 @@ + +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifdef MOZ_GECKO_PROFILER + +# include "ProfileBuffer.h" + +# include "mozilla/PowerOfTwo.h" +# include "mozilla/ProfileBufferChunkManagerWithLocalLimit.h" +# include "mozilla/ProfileChunkedBuffer.h" + +# include "gtest/gtest.h" + +// Make sure we can record one entry and read it +TEST(ThreadProfile, InsertOneEntry) +{ + mozilla::ProfileBufferChunkManagerWithLocalLimit chunkManager( + 2 * (1 + uint32_t(sizeof(ProfileBufferEntry))) * 4, + 2 * (1 + uint32_t(sizeof(ProfileBufferEntry)))); + mozilla::ProfileChunkedBuffer profileChunkedBuffer( + mozilla::ProfileChunkedBuffer::ThreadSafety::WithMutex, chunkManager); + auto pb = mozilla::MakeUnique<ProfileBuffer>(profileChunkedBuffer); + pb->AddEntry(ProfileBufferEntry::Time(123.1)); + ProfileBufferEntry entry = pb->GetEntry(pb->BufferRangeStart()); + ASSERT_TRUE(entry.IsTime()); + ASSERT_EQ(123.1, entry.GetDouble()); +} + +// See if we can insert some entries +TEST(ThreadProfile, InsertEntriesNoWrap) +{ + mozilla::ProfileBufferChunkManagerWithLocalLimit chunkManager( + 100 * (1 + uint32_t(sizeof(ProfileBufferEntry))), + 100 * (1 + uint32_t(sizeof(ProfileBufferEntry))) / 4); + mozilla::ProfileChunkedBuffer profileChunkedBuffer( + mozilla::ProfileChunkedBuffer::ThreadSafety::WithMutex, chunkManager); + auto pb = mozilla::MakeUnique<ProfileBuffer>(profileChunkedBuffer); + const int test_size = 50; + for (int i = 0; i < test_size; i++) { + pb->AddEntry(ProfileBufferEntry::Time(i)); + } + int times = 0; + uint64_t readPos = pb->BufferRangeStart(); + while (readPos != pb->BufferRangeEnd()) { + ProfileBufferEntry entry = pb->GetEntry(readPos); + readPos++; + if (entry.GetKind() == ProfileBufferEntry::Kind::INVALID) { + continue; + } + ASSERT_TRUE(entry.IsTime()); + ASSERT_EQ(times, entry.GetDouble()); + times++; + } + ASSERT_EQ(test_size, times); +} + +#endif // MOZ_GECKO_PROFILER diff --git a/tools/profiler/tests/gtest/moz.build b/tools/profiler/tests/gtest/moz.build new file mode 100644 index 0000000000..4eb1fef762 --- /dev/null +++ b/tools/profiler/tests/gtest/moz.build @@ -0,0 +1,45 @@ +# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*- +# vim: set filetype=python: +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, you can obtain one at http://mozilla.org/MPL/2.0/. + +if ( + CONFIG["MOZ_GECKO_PROFILER"] + and CONFIG["OS_TARGET"] in ("Android", "Linux") + and CONFIG["CPU_ARCH"] + in ( + "arm", + "aarch64", + "x86", + "x86_64", + ) +): + UNIFIED_SOURCES += [ + "LulTest.cpp", + "LulTestDwarf.cpp", + "LulTestInfrastructure.cpp", + ] + +LOCAL_INCLUDES += [ + "/netwerk/base", + "/netwerk/protocol/http", + "/toolkit/components/jsoncpp/include", + "/tools/profiler/core", + "/tools/profiler/gecko", + "/tools/profiler/lul", +] + +if CONFIG["OS_TARGET"] != "Android": + UNIFIED_SOURCES += [ + "GeckoProfiler.cpp", + "ThreadProfileTest.cpp", + ] + +USE_LIBS += [ + "jsoncpp", +] + +include("/ipc/chromium/chromium-config.mozbuild") + +FINAL_LIBRARY = "xul-gtest" |